diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/alpha/kernel/process.c linux.21pre4-ac2/arch/alpha/kernel/process.c --- linux.21pre4/arch/alpha/kernel/process.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/arch/alpha/kernel/process.c 2003-01-29 17:10:04.000000000 +0000 @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -74,9 +75,6 @@ cpu_idle(void) { /* An endless idle loop with no priority at all. */ - current->nice = 20; - current->counter = -100; - while (1) { /* FIXME -- EV6 and LCA45 know how to power down the CPU. */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/alpha/kernel/smp.c linux.21pre4-ac2/arch/alpha/kernel/smp.c --- linux.21pre4/arch/alpha/kernel/smp.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/arch/alpha/kernel/smp.c 2003-01-29 17:10:04.000000000 +0000 @@ -81,6 +81,7 @@ int smp_num_probed; /* Internal processor count */ int smp_num_cpus = 1; /* Number that came online. */ int smp_threads_ready; /* True once the per process idle is forked. */ +unsigned long cache_decay_ticks; int __cpu_number_map[NR_CPUS]; int __cpu_logical_map[NR_CPUS]; @@ -155,11 +156,6 @@ { int cpuid = hard_smp_processor_id(); - if (current != init_tasks[cpu_number_map(cpuid)]) { - printk("BUG: smp_calling: cpu %d current %p init_tasks[cpu_number_map(cpuid)] %p\n", - cpuid, current, init_tasks[cpu_number_map(cpuid)]); - } - DBGS(("CALLIN %d state 0x%lx\n", cpuid, current->state)); /* Turn on machine checks. */ @@ -217,9 +213,6 @@ DBGS(("smp_callin: commencing CPU %d current %p\n", cpuid, current)); - /* Setup the scheduler for this processor. */ - init_idle(); - /* ??? This should be in init_idle. */ atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; @@ -449,14 +442,11 @@ if (idle == &init_task) panic("idle process is init_task for CPU %d", cpuid); - idle->processor = cpuid; - idle->cpus_runnable = 1 << cpuid; /* we schedule the first task manually */ + init_idle(idle, cpuid); + unhash_process(idle); + __cpu_logical_map[cpunum] = cpuid; __cpu_number_map[cpuid] = cpunum; - - del_from_runqueue(idle); - unhash_process(idle); - init_tasks[cpunum] = idle; DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n", cpuid, idle->state, idle->flags)); @@ -563,13 +553,10 @@ __cpu_number_map[boot_cpuid] = 0; __cpu_logical_map[0] = boot_cpuid; - current->processor = boot_cpuid; smp_store_cpu_info(boot_cpuid); smp_setup_percpu_timer(boot_cpuid); - init_idle(); - /* ??? This should be in init_idle. */ atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/alpha/mm/fault.c linux.21pre4-ac2/arch/alpha/mm/fault.c --- linux.21pre4/arch/alpha/mm/fault.c 2003-01-29 16:27:05.000000000 +0000 +++ linux.21pre4-ac2/arch/alpha/mm/fault.c 2003-01-06 19:10:18.000000000 +0000 @@ -122,8 +122,6 @@ goto bad_area; if (vma->vm_start <= address) goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/arm/config.in linux.21pre4-ac2/arch/arm/config.in --- linux.21pre4/arch/arm/config.in 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/arch/arm/config.in 2003-01-06 15:41:35.000000000 +0000 @@ -646,6 +646,7 @@ bool 'Kernel debugging' CONFIG_DEBUG_KERNEL dep_bool ' Debug memory allocations' CONFIG_DEBUG_SLAB $CONFIG_DEBUG_KERNEL dep_bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ $CONFIG_DEBUG_KERNEL +dep_bool ' Morse code panics' CONFIG_PANIC_MORSE $CONFIG_DEBUG_KERNEL $CONFIG_PC_KEYB dep_bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK $CONFIG_DEBUG_KERNEL dep_bool ' Wait queue debugging' CONFIG_DEBUG_WAITQ $CONFIG_DEBUG_KERNEL dep_bool ' Verbose BUG() reporting (adds 70K)' CONFIG_DEBUG_BUGVERBOSE $CONFIG_DEBUG_KERNEL diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/arm/mm/fault-common.c linux.21pre4-ac2/arch/arm/mm/fault-common.c --- linux.21pre4/arch/arm/mm/fault-common.c 2003-01-29 16:27:15.000000000 +0000 +++ linux.21pre4-ac2/arch/arm/mm/fault-common.c 2003-01-06 19:10:18.000000000 +0000 @@ -229,7 +229,7 @@ goto survive; check_stack: - if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) + if (!expand_stack(vma, addr)) goto good_area; out: return fault; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/boot/setup.S linux.21pre4-ac2/arch/i386/boot/setup.S --- linux.21pre4/arch/i386/boot/setup.S 2003-01-29 16:27:05.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/boot/setup.S 2003-01-08 15:33:39.000000000 +0000 @@ -45,6 +45,10 @@ * New A20 code ported from SYSLINUX by H. Peter Anvin. AMD Elan bugfixes * by Robert Schwebel, December 2001 * + * BIOS Enhanced Disk Drive support + * by Matt Domsch October 2002 + * conformant to T13 Committee www.t13.org + * projects 1572D, 1484D, 1386D, 1226DT */ #include @@ -53,6 +57,7 @@ #include #include #include +#include #include /* Signature words to ensure LILO loaded us right */ @@ -543,6 +548,70 @@ done_apm_bios: #endif +#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) +# Do the BIOS Enhanced Disk Drive calls +# This consists of two calls: +# int 13h ah=41h "Check Extensions Present" +# int 13h ah=48h "Get Device Parameters" +# +# A buffer of size EDDMAXNR*(EDDEXTSIZE+EDDPARMSIZE) is reserved for our use +# in the empty_zero_page at EDDBUF. The first four bytes of which are +# used to store the device number, interface support map and version +# results from fn41. The following 74 bytes are used to store +# the results from fn48. Starting from device 80h, fn41, then fn48 +# are called and their results stored in EDDBUF+n*(EDDEXTSIZE+EDDPARMIZE). +# Then the pointer is incremented to store the data for the next call. +# This repeats until either a device doesn't exist, or until EDDMAXNR +# devices have been stored. +# The one tricky part is that ds:si always points four bytes into +# the structure, and the fn41 results are stored at offsets +# from there. This removes the need to increment the pointer for +# every store, and leaves it ready for the fn48 call. +# A second one-byte buffer, EDDNR, in the empty_zero_page stores +# the number of BIOS devices which exist, up to EDDMAXNR. +# In setup.c, copy_edd() stores both empty_zero_page buffers away +# for later use, as they would get overwritten otherwise. +# This code is sensitive to the size of the structs in edd.h +edd_start: + # %ds points to the bootsector + # result buffer for fn48 + movw $EDDBUF+EDDEXTSIZE, %si # in ds:si, fn41 results + # kept just before that + movb $0, (EDDNR) # zero value at EDDNR + movb $0x80, %dl # BIOS device 0x80 + +edd_check_ext: + movb $CHECKEXTENSIONSPRESENT, %ah # Function 41 + movw $EDDMAGIC1, %bx # magic + int $0x13 # make the call + jc edd_done # no more BIOS devices + + cmpw $EDDMAGIC2, %bx # is magic right? + jne edd_next # nope, next... + + movb %dl, %ds:-4(%si) # store device number + movb %ah, %ds:-3(%si) # store version + movw %cx, %ds:-2(%si) # store extensions + incb (EDDNR) # note that we stored something + +edd_get_device_params: + movw $EDDPARMSIZE, %ds:(%si) # put size + movb $GETDEVICEPARAMETERS, %ah # Function 48 + int $0x13 # make the call + # Don't check for fail return + # it doesn't matter. + movw %si, %ax # increment si + addw $EDDPARMSIZE+EDDEXTSIZE, %ax + movw %ax, %si + +edd_next: + incb %dl # increment to next device + cmpb $EDDMAXNR, (EDDNR) # Out of space? + jb edd_check_ext # keep looping + +edd_done: +#endif + # Now we want to move to protected mode ... cmpw $0, %cs:realmode_swtch jz rmodeswtch_normal diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/config.in linux.21pre4-ac2/arch/i386/config.in --- linux.21pre4/arch/i386/config.in 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/config.in 2003-01-29 17:11:02.000000000 +0000 @@ -185,6 +185,23 @@ bool 'Machine Check Exception' CONFIG_X86_MCE +mainmenu_option next_comment +comment 'CPU Frequency scaling' +dep_bool 'CPU Frequency scaling (EXPERIMENTAL)' CONFIG_CPU_FREQ $CONFIG_EXPERIMENTAL +if [ "$CONFIG_CPU_FREQ" = "y" ]; then + bool ' /proc/sys/cpu/ interface (2.4. / OLD)' CONFIG_CPU_FREQ_24_API + tristate ' AMD Mobile K6-2/K6-3 PowerNow!' CONFIG_X86_POWERNOW_K6 + if [ "$CONFIG_MELAN" = "y" ]; then + tristate ' AMD Elan' CONFIG_ELAN_CPUFREQ + fi + tristate ' VIA Cyrix III Longhaul' CONFIG_X86_LONGHAUL + tristate ' Intel Speedstep' CONFIG_X86_SPEEDSTEP + tristate ' Intel Pentium 4 clock modulation' CONFIG_X86_P4_CLOCKMOD + tristate ' Transmeta LongRun' CONFIG_X86_LONGRUN + tristate ' Cyrix MediaGX/NatSemi Geode Suspend Modulation' CONFIG_X86_GX_SUSPMOD +fi +endmenu + tristate 'Toshiba Laptop support' CONFIG_TOSHIBA tristate 'Dell laptop support' CONFIG_I8K @@ -192,6 +209,10 @@ tristate '/dev/cpu/*/msr - Model-specific register support' CONFIG_X86_MSR tristate '/dev/cpu/*/cpuid - CPU information support' CONFIG_X86_CPUID +if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then + tristate 'BIOS Enhanced Disk Drive calls determine boot disk (EXPERIMENTAL)' CONFIG_EDD +fi + choice 'High Memory Support' \ "off CONFIG_NOHIGHMEM \ 4GB CONFIG_HIGHMEM4G \ @@ -283,6 +304,8 @@ bool 'ISA bus support' CONFIG_ISA fi +tristate 'NatSemi SCx200 support' CONFIG_SCx200 + source drivers/pci/Config.in bool 'EISA support' CONFIG_EISA @@ -315,6 +338,8 @@ tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC +bool 'Kernel .config support' CONFIG_IKCONFIG + bool 'Power Management support' CONFIG_PM if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then @@ -465,12 +490,13 @@ bool 'Kernel debugging' CONFIG_DEBUG_KERNEL if [ "$CONFIG_DEBUG_KERNEL" != "n" ]; then bool ' Check for stack overflows' CONFIG_DEBUG_STACKOVERFLOW + bool ' Compile the kernel with frame pointers' CONFIG_FRAME_POINTER bool ' Debug high memory support' CONFIG_DEBUG_HIGHMEM bool ' Debug memory allocations' CONFIG_DEBUG_SLAB bool ' Memory mapped I/O debugging' CONFIG_DEBUG_IOVIRT bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ + bool ' Morse code panics' CONFIG_PANIC_MORSE bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK - bool ' Compile the kernel with frame pointers' CONFIG_FRAME_POINTER fi endmenu diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/defconfig linux.21pre4-ac2/arch/i386/defconfig --- linux.21pre4/arch/i386/defconfig 2003-01-29 16:27:05.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/defconfig 2003-01-08 15:33:39.000000000 +0000 @@ -56,6 +56,7 @@ # CONFIG_MICROCODE is not set # CONFIG_X86_MSR is not set # CONFIG_X86_CPUID is not set +# CONFIG_EDD is not set CONFIG_NOHIGHMEM=y # CONFIG_HIGHMEM4G is not set # CONFIG_HIGHMEM64G is not set diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/dmi_scan.c linux.21pre4-ac2/arch/i386/kernel/dmi_scan.c --- linux.21pre4/arch/i386/kernel/dmi_scan.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/dmi_scan.c 2003-01-29 17:11:37.000000000 +0000 @@ -427,17 +427,6 @@ } /* - * ASUS K7V-RM has broken ACPI table defining sleep modes - */ - -static __init int broken_acpi_Sx(struct dmi_blacklist *d) -{ - printk(KERN_WARNING "Detected ASUS mainboard with broken ACPI sleep table\n"); - dmi_broken |= BROKEN_ACPI_Sx; - return 0; -} - -/* * Toshiba keyboard likes to repeat keys when they are not repeated. */ @@ -764,12 +753,6 @@ NO_MATCH, NO_MATCH, NO_MATCH } }, - { broken_acpi_Sx, "ASUS K7V-RM", { /* Bad ACPI Sx table */ - MATCH(DMI_BIOS_VERSION,"ASUS K7V-RM ACPI BIOS Revision 1003A"), - MATCH(DMI_BOARD_NAME, ""), - NO_MATCH, NO_MATCH - } }, - { broken_toshiba_keyboard, "Toshiba Satellite 4030cdt", { /* Keyboard generates spurious repeats */ MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"), NO_MATCH, NO_MATCH, NO_MATCH diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/edd.c linux.21pre4-ac2/arch/i386/kernel/edd.c --- linux.21pre4/arch/i386/kernel/edd.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/arch/i386/kernel/edd.c 2003-01-28 16:15:24.000000000 +0000 @@ -0,0 +1,672 @@ +/* + * linux/arch/i386/kernel/edd.c + * Copyright (C) 2002 Dell Computer Corporation + * by Matt Domsch + * + * BIOS Enhanced Disk Drive Services (EDD) + * conformant to T13 Committee www.t13.org + * projects 1572D, 1484D, 1386D, 1226DT + * + * This code takes information provided by BIOS EDD calls + * fn41 - Check Extensions Present and + * fn48 - Get Device Parametes with EDD extensions + * made in setup.S, copied to safe structures in setup.c, + * and presents it in /proc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* + * TODO: + * - move edd.[ch] to better locations if/when one is decided + * - keep current with 2.5 EDD code changes + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Matt Domsch "); +MODULE_DESCRIPTION("proc interface to BIOS EDD information"); +MODULE_LICENSE("GPL"); + +#define EDD_VERSION "0.09 2003-Jan-22" +#define EDD_DEVICE_NAME_SIZE 16 +#define REPORT_URL "http://domsch.com/linux/edd30/results.html" + +#define left (count - (p - page) - 1) + +static struct proc_dir_entry *bios_dir; + +struct attr_entry { + struct proc_dir_entry *entry; + struct list_head node; +}; + +struct edd_device { + char name[EDD_DEVICE_NAME_SIZE]; + struct edd_info *info; + struct proc_dir_entry *dir; + struct list_head attr_list; +}; + +static struct edd_device *edd_devices[EDDMAXNR]; + +struct edd_attribute { + char *name; + int (*show)(char *page, char **start, off_t off, + int count, int *eof, void *data); + int (*test) (struct edd_device * edev); +}; + +#define EDD_DEVICE_ATTR(_name,_show,_test) \ +struct edd_attribute edd_attr_##_name = { \ + .name = __stringify(_name), \ + .show = _show, \ + .test = _test, \ +}; + +static inline struct edd_info * +edd_dev_get_info(struct edd_device *edev) +{ + return edev->info; +} + +static inline void +edd_dev_set_info(struct edd_device *edev, struct edd_info *info) +{ + edev->info = info; +} + +static int +proc_calc_metrics(char *page, char **start, off_t off, + int count, int *eof, int len) +{ + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + return len; +} + +static int +edd_dump_raw_data(char *b, int count, void *data, int length) +{ + char *orig_b = b; + char hexbuf[80], ascbuf[20], *h, *a, c; + unsigned char *p = data; + unsigned long column = 0; + int length_printed = 0, d; + const char maxcolumn = 16; + while (length_printed < length && count > 0) { + h = hexbuf; + a = ascbuf; + for (column = 0; + column < maxcolumn && length_printed < length; column++) { + h += sprintf(h, "%02x ", (unsigned char) *p); + if (!isprint(*p)) + c = '.'; + else + c = *p; + a += sprintf(a, "%c", c); + p++; + length_printed++; + } + /* pad out the line */ + for (; column < maxcolumn; column++) { + h += sprintf(h, " "); + a += sprintf(a, " "); + } + d = snprintf(b, count, "%s\t%s\n", hexbuf, ascbuf); + b += d; + count -= d; + } + return (b - orig_b); +} + +static int +edd_show_host_bus(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + int i; + + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + for (i = 0; i < 4; i++) { + if (isprint(info->params.host_bus_type[i])) { + p += snprintf(p, left, "%c", info->params.host_bus_type[i]); + } else { + p += snprintf(p, left, " "); + } + } + + if (!strncmp(info->params.host_bus_type, "ISA", 3)) { + p += snprintf(p, left, "\tbase_address: %x\n", + info->params.interface_path.isa.base_address); + } else if (!strncmp(info->params.host_bus_type, "PCIX", 4) || + !strncmp(info->params.host_bus_type, "PCI", 3)) { + p += snprintf(p, left, + "\t%02x:%02x.%d channel: %u\n", + info->params.interface_path.pci.bus, + info->params.interface_path.pci.slot, + info->params.interface_path.pci.function, + info->params.interface_path.pci.channel); + } else if (!strncmp(info->params.host_bus_type, "IBND", 4) || + !strncmp(info->params.host_bus_type, "XPRS", 4) || + !strncmp(info->params.host_bus_type, "HTPT", 4)) { + p += snprintf(p, left, + "\tTBD: %llx\n", + info->params.interface_path.ibnd.reserved); + + } else { + p += snprintf(p, left, "\tunknown: %llx\n", + info->params.interface_path.unknown.reserved); + } + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_interface(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + int i; + + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + for (i = 0; i < 8; i++) { + if (isprint(info->params.interface_type[i])) { + p += snprintf(p, left, "%c", info->params.interface_type[i]); + } else { + p += snprintf(p, left, " "); + } + } + if (!strncmp(info->params.interface_type, "ATAPI", 5)) { + p += snprintf(p, left, "\tdevice: %u lun: %u\n", + info->params.device_path.atapi.device, + info->params.device_path.atapi.lun); + } else if (!strncmp(info->params.interface_type, "ATA", 3)) { + p += snprintf(p, left, "\tdevice: %u\n", + info->params.device_path.ata.device); + } else if (!strncmp(info->params.interface_type, "SCSI", 4)) { + p += snprintf(p, left, "\tid: %u lun: %llu\n", + info->params.device_path.scsi.id, + info->params.device_path.scsi.lun); + } else if (!strncmp(info->params.interface_type, "USB", 3)) { + p += snprintf(p, left, "\tserial_number: %llx\n", + info->params.device_path.usb.serial_number); + } else if (!strncmp(info->params.interface_type, "1394", 4)) { + p += snprintf(p, left, "\teui: %llx\n", + info->params.device_path.i1394.eui); + } else if (!strncmp(info->params.interface_type, "FIBRE", 5)) { + p += snprintf(p, left, "\twwid: %llx lun: %llx\n", + info->params.device_path.fibre.wwid, + info->params.device_path.fibre.lun); + } else if (!strncmp(info->params.interface_type, "I2O", 3)) { + p += snprintf(p, left, "\tidentity_tag: %llx\n", + info->params.device_path.i2o.identity_tag); + } else if (!strncmp(info->params.interface_type, "RAID", 4)) { + p += snprintf(p, left, "\tidentity_tag: %x\n", + info->params.device_path.raid.array_number); + } else if (!strncmp(info->params.interface_type, "SATA", 4)) { + p += snprintf(p, left, "\tdevice: %u\n", + info->params.device_path.sata.device); + } else { + p += snprintf(p, left, "\tunknown: %llx %llx\n", + info->params.device_path.unknown.reserved1, + info->params.device_path.unknown.reserved2); + } + + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +/** + * edd_show_raw_data() - unparses EDD information, returned to user-space + * + * Returns: number of bytes written, or 0 on failure + */ +static int +edd_show_raw_data(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + int i, warn_padding = 0, nonzero_path = 0, + len = sizeof (*info) - 4; + uint8_t checksum = 0, c = 0; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + if (!(info->params.key == 0xBEDD || info->params.key == 0xDDBE)) + len = info->params.length; + + p += snprintf(p, left, "int13 fn48 returned data:\n\n"); + p += edd_dump_raw_data(p, left, ((char *) info) + 4, len); + + /* Spec violation. Adaptec AIC7899 returns 0xDDBE + here, when it should be 0xBEDD. + */ + p += snprintf(p, left, "\n"); + if (info->params.key == 0xDDBE) { + p += snprintf(p, left, + "Warning: Spec violation. Key should be 0xBEDD, is 0xDDBE\n"); + } + + if (!(info->params.key == 0xBEDD || info->params.key == 0xDDBE)) { + goto out; + } + + for (i = 30; i <= 73; i++) { + c = *(((uint8_t *) info) + i + 4); + if (c) + nonzero_path++; + checksum += c; + } + + if (checksum) { + p += snprintf(p, left, + "Warning: Spec violation. Device Path checksum invalid.\n"); + } + + if (!nonzero_path) { + p += snprintf(p, left, "Error: Spec violation. Empty device path.\n"); + goto out; + } + + for (i = 0; i < 4; i++) { + if (!isprint(info->params.host_bus_type[i])) { + warn_padding++; + } + } + for (i = 0; i < 8; i++) { + if (!isprint(info->params.interface_type[i])) { + warn_padding++; + } + } + + if (warn_padding) { + p += snprintf(p, left, + "Warning: Spec violation. Padding should be 0x20.\n"); + } + +out: + p += snprintf(p, left, "\nPlease check %s\n", REPORT_URL); + p += snprintf(p, left, "to see if this device has been reported. If not,\n"); + p += snprintf(p, left, "please send the information requested there.\n"); + + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_version(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + p += snprintf(p, left, "0x%02x\n", info->version); + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_extensions(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + if (info->interface_support & EDD_EXT_FIXED_DISK_ACCESS) { + p += snprintf(p, left, "Fixed disk access\n"); + } + if (info->interface_support & EDD_EXT_DEVICE_LOCKING_AND_EJECTING) { + p += snprintf(p, left, "Device locking and ejecting\n"); + } + if (info->interface_support & EDD_EXT_ENHANCED_DISK_DRIVE_SUPPORT) { + p += snprintf(p, left, "Enhanced Disk Drive support\n"); + } + if (info->interface_support & EDD_EXT_64BIT_EXTENSIONS) { + p += snprintf(p, left, "64-bit extensions\n"); + } + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_info_flags(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + if (info->params.info_flags & EDD_INFO_DMA_BOUNDRY_ERROR_TRANSPARENT) + p += snprintf(p, left, "DMA boundry error transparent\n"); + if (info->params.info_flags & EDD_INFO_GEOMETRY_VALID) + p += snprintf(p, left, "geometry valid\n"); + if (info->params.info_flags & EDD_INFO_REMOVABLE) + p += snprintf(p, left, "removable\n"); + if (info->params.info_flags & EDD_INFO_WRITE_VERIFY) + p += snprintf(p, left, "write verify\n"); + if (info->params.info_flags & EDD_INFO_MEDIA_CHANGE_NOTIFICATION) + p += snprintf(p, left, "media change notification\n"); + if (info->params.info_flags & EDD_INFO_LOCKABLE) + p += snprintf(p, left, "lockable\n"); + if (info->params.info_flags & EDD_INFO_NO_MEDIA_PRESENT) + p += snprintf(p, left, "no media present\n"); + if (info->params.info_flags & EDD_INFO_USE_INT13_FN50) + p += snprintf(p, left, "use int13 fn50\n"); + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_default_cylinders(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + p += snprintf(p, left, "0x%x\n", info->params.num_default_cylinders); + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_default_heads(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + p += snprintf(p, left, "0x%x\n", info->params.num_default_heads); + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_default_sectors_per_track(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + p += snprintf(p, left, "0x%x\n", info->params.sectors_per_track); + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_show_sectors(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct edd_info *info = data; + char *p = page; + if (!info || !page || off) { + return proc_calc_metrics(page, start, off, count, eof, 0); + } + + p += snprintf(p, left, "0x%llx\n", info->params.number_of_sectors); + return proc_calc_metrics(page, start, off, count, eof, (p - page)); +} + +static int +edd_has_default_cylinders(struct edd_device *edev) +{ + struct edd_info *info = edd_dev_get_info(edev); + if (!edev || !info) + return 0; + return info->params.num_default_cylinders > 0; +} + +static int +edd_has_default_heads(struct edd_device *edev) +{ + struct edd_info *info = edd_dev_get_info(edev); + if (!edev || !info) + return 0; + return info->params.num_default_heads > 0; +} + +static int +edd_has_default_sectors_per_track(struct edd_device *edev) +{ + struct edd_info *info = edd_dev_get_info(edev); + if (!edev || !info) + return 0; + return info->params.sectors_per_track > 0; +} + +static int +edd_has_edd30(struct edd_device *edev) +{ + struct edd_info *info = edd_dev_get_info(edev); + int i, nonzero_path = 0; + char c; + + if (!edev || !info) + return 0; + + if (!(info->params.key == 0xBEDD || info->params.key == 0xDDBE)) { + return 0; + } + + for (i = 30; i <= 73; i++) { + c = *(((uint8_t *) info) + i + 4); + if (c) { + nonzero_path++; + break; + } + } + if (!nonzero_path) { + return 0; + } + + return 1; +} + +static EDD_DEVICE_ATTR(raw_data, edd_show_raw_data, NULL); +static EDD_DEVICE_ATTR(version, edd_show_version, NULL); +static EDD_DEVICE_ATTR(extensions, edd_show_extensions, NULL); +static EDD_DEVICE_ATTR(info_flags, edd_show_info_flags, NULL); +static EDD_DEVICE_ATTR(sectors, edd_show_sectors, NULL); +static EDD_DEVICE_ATTR(default_cylinders, edd_show_default_cylinders, + edd_has_default_cylinders); +static EDD_DEVICE_ATTR(default_heads, edd_show_default_heads, + edd_has_default_heads); +static EDD_DEVICE_ATTR(default_sectors_per_track, + edd_show_default_sectors_per_track, + edd_has_default_sectors_per_track); +static EDD_DEVICE_ATTR(interface, edd_show_interface,edd_has_edd30); +static EDD_DEVICE_ATTR(host_bus, edd_show_host_bus, edd_has_edd30); + +static struct edd_attribute *def_attrs[] = { + &edd_attr_raw_data, + &edd_attr_version, + &edd_attr_extensions, + &edd_attr_info_flags, + &edd_attr_sectors, + &edd_attr_default_cylinders, + &edd_attr_default_heads, + &edd_attr_default_sectors_per_track, + &edd_attr_interface, + &edd_attr_host_bus, + NULL, +}; + +static inline void +edd_device_unregister(struct edd_device *edev) +{ + struct list_head *pos, *next; + struct attr_entry *ae; + + list_for_each_safe(pos, next, &edev->attr_list) { + ae = list_entry(pos, struct attr_entry, node); + remove_proc_entry(ae->entry->name, edev->dir); + list_del(&ae->node); + kfree(ae); + } + + remove_proc_entry(edev->dir->name, bios_dir); +} + +static int +edd_populate_dir(struct edd_device *edev) +{ + struct edd_attribute *attr; + struct attr_entry *ae; + int i; + int error = 0; + + for (i = 0; (attr=def_attrs[i]); i++) { + if (!attr->test || (attr->test && attr->test(edev))) { + ae = kmalloc(sizeof (*ae), GFP_KERNEL); + if (ae == NULL) { + error = 1; + break; + } + INIT_LIST_HEAD(&ae->node); + ae->entry = + create_proc_read_entry(attr->name, 0444, + edev->dir, attr->show, + edd_dev_get_info(edev)); + if (ae->entry == NULL) { + error = 1; + break; + } + list_add(&ae->node, &edev->attr_list); + } + } + + if (error) + return error; + + return 0; +} + +static int +edd_make_dir(struct edd_device *edev) +{ + int error=1; + + edev->dir = proc_mkdir(edev->name, bios_dir); + if (edev->dir != NULL) { + edev->dir->mode = (S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO); + error = edd_populate_dir(edev); + } + return error; +} + +static int +edd_device_register(struct edd_device *edev, int i) +{ + int error; + + if (!edev) + return 1; + memset(edev, 0, sizeof (*edev)); + INIT_LIST_HEAD(&edev->attr_list); + edd_dev_set_info(edev, &edd[i]); + snprintf(edev->name, EDD_DEVICE_NAME_SIZE, "int13_dev%02x", + edd[i].device); + error = edd_make_dir(edev); + return error; +} + +/** + * edd_init() - creates /proc tree of EDD data + * + * This assumes that eddnr and edd were + * assigned in setup.c already. + */ +static int __init +edd_init(void) +{ + unsigned int i; + int rc = 0; + struct edd_device *edev; + + printk(KERN_INFO "BIOS EDD facility v%s, %d devices found\n", + EDD_VERSION, eddnr); + + if (!eddnr) { + printk(KERN_INFO "EDD information not available.\n"); + return 1; + } + + bios_dir = proc_mkdir("bios", NULL); + if (bios_dir == NULL) + return 1; + + for (i = 0; i < eddnr && i < EDDMAXNR && !rc; i++) { + edev = kmalloc(sizeof (*edev), GFP_KERNEL); + if (!edev) { + rc = 1; + break; + } + + rc = edd_device_register(edev, i); + if (rc) { + break; + } + edd_devices[i] = edev; + } + + if (rc) { + for (i = 0; i < eddnr && i < EDDMAXNR; i++) { + if ((edev = edd_devices[i])) { + edd_device_unregister(edev); + kfree(edev); + } + } + + remove_proc_entry(bios_dir->name, NULL); + } + + return rc; +} + +static void __exit +edd_exit(void) +{ + int i; + struct edd_device *edev; + + for (i = 0; i < eddnr && i < EDDMAXNR; i++) { + if ((edev = edd_devices[i])) { + edd_device_unregister(edev); + kfree(edev); + } + } + + remove_proc_entry(bios_dir->name, NULL); +} + +module_init(edd_init); +module_exit(edd_exit); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/elanfreq.c linux.21pre4-ac2/arch/i386/kernel/elanfreq.c --- linux.21pre4/arch/i386/kernel/elanfreq.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/arch/i386/kernel/elanfreq.c 2003-01-10 16:27:32.000000000 +0000 @@ -0,0 +1,297 @@ +/* + * elanfreq: cpufreq driver for the AMD ELAN family + * + * (c) Copyright 2002 Robert Schwebel + * + * Parts of this code are (c) Sven Geggus + * + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * 2002-02-13: - initial revision for 2.4.18-pre9 by Robert Schwebel + * + */ + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */ +#define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */ + +static struct cpufreq_driver *elanfreq_driver; + +/* Module parameter */ +static int max_freq; + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Robert Schwebel , Sven Geggus "); +MODULE_DESCRIPTION("cpufreq driver for AMD's Elan CPUs"); + +struct s_elan_multiplier { + int clock; /* frequency in kHz */ + int val40h; /* PMU Force Mode register */ + int val80h; /* CPU Clock Speed Register */ +}; + +/* + * It is important that the frequencies + * are listed in ascending order here! + */ +struct s_elan_multiplier elan_multiplier[] = { + {1000, 0x02, 0x18}, + {2000, 0x02, 0x10}, + {4000, 0x02, 0x08}, + {8000, 0x00, 0x00}, + {16000, 0x00, 0x02}, + {33000, 0x00, 0x04}, + {66000, 0x01, 0x04}, + {99000, 0x01, 0x05} +}; + +static struct cpufreq_frequency_table elanfreq_table[] = { + {0, 1000}, + {1, 2000}, + {2, 4000}, + {3, 8000}, + {4, 16000}, + {5, 33000}, + {6, 66000}, + {7, 99000}, + {0, CPUFREQ_TABLE_END}, +}; + + +/** + * elanfreq_get_cpu_frequency: determine current cpu speed + * + * Finds out at which frequency the CPU of the Elan SOC runs + * at the moment. Frequencies from 1 to 33 MHz are generated + * the normal way, 66 and 99 MHz are called "Hyperspeed Mode" + * and have the rest of the chip running with 33 MHz. + */ + +static unsigned int elanfreq_get_cpu_frequency(void) +{ + u8 clockspeed_reg; /* Clock Speed Register */ + + local_irq_disable(); + outb_p(0x80,REG_CSCIR); + clockspeed_reg = inb_p(REG_CSCDR); + local_irq_enable(); + + if ((clockspeed_reg & 0xE0) == 0xE0) { return 0; } + + /* Are we in CPU clock multiplied mode (66/99 MHz)? */ + if ((clockspeed_reg & 0xE0) == 0xC0) { + if ((clockspeed_reg & 0x01) == 0) { + return 66000; + } else { + return 99000; + } + } + + /* 33 MHz is not 32 MHz... */ + if ((clockspeed_reg & 0xE0)==0xA0) + return 33000; + + return ((1<<((clockspeed_reg & 0xE0) >> 5)) * 1000); +} + + +/** + * elanfreq_set_cpu_frequency: Change the CPU core frequency + * @cpu: cpu number + * @freq: frequency in kHz + * + * This function takes a frequency value and changes the CPU frequency + * according to this. Note that the frequency has to be checked by + * elanfreq_validatespeed() for correctness! + * + * There is no return value. + */ + +static void elanfreq_set_cpu_state (unsigned int state) { + + struct cpufreq_freqs freqs; + + if (!elanfreq_driver) { + printk(KERN_ERR "cpufreq: initialization problem or invalid target frequency\n"); + return; + } + + freqs.old = elanfreq_get_cpu_frequency(); + freqs.new = elan_multiplier[state].clock; + freqs.cpu = 0; /* elanfreq.c is UP only driver */ + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",elan_multiplier[state].clock); + + + /* + * Access to the Elan's internal registers is indexed via + * 0x22: Chip Setup & Control Register Index Register (CSCI) + * 0x23: Chip Setup & Control Register Data Register (CSCD) + * + */ + + /* + * 0x40 is the Power Management Unit's Force Mode Register. + * Bit 6 enables Hyperspeed Mode (66/100 MHz core frequency) + */ + + local_irq_disable(); + outb_p(0x40,REG_CSCIR); /* Disable hyperspeed mode */ + outb_p(0x00,REG_CSCDR); + local_irq_enable(); /* wait till internal pipelines and */ + udelay(1000); /* buffers have cleaned up */ + + local_irq_disable(); + + /* now, set the CPU clock speed register (0x80) */ + outb_p(0x80,REG_CSCIR); + outb_p(elan_multiplier[state].val80h,REG_CSCDR); + + /* now, the hyperspeed bit in PMU Force Mode Register (0x40) */ + outb_p(0x40,REG_CSCIR); + outb_p(elan_multiplier[state].val40h,REG_CSCDR); + udelay(10000); + local_irq_enable(); + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); +}; + + +/** + * elanfreq_validatespeed: test if frequency range is valid + * + * This function checks if a given frequency range in kHz is valid + * for the hardware supported by the driver. + */ + +static int elanfreq_verify (struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]); +} + +static int elanfreq_setpolicy (struct cpufreq_policy *policy) +{ + unsigned int newstate = 0; + + if (cpufreq_frequency_table_setpolicy(policy, &elanfreq_table[0], &newstate)) + return -EINVAL; + + elanfreq_set_cpu_state(newstate); + + return 0; +} + + +/* + * Module init and exit code + */ + +#ifndef MODULE +/** + * elanfreq_setup - elanfreq command line parameter parsing + * + * elanfreq command line parameter. Use: + * elanfreq=66000 + * to set the maximum CPU frequency to 66 MHz. Note that in + * case you do not give this boot parameter, the maximum + * frequency will fall back to _current_ CPU frequency which + * might be lower. If you build this as a module, use the + * max_freq module parameter instead. + */ +static int __init elanfreq_setup(char *str) +{ + max_freq = simple_strtoul(str, &str, 0); + return 1; +} +__setup("elanfreq=", elanfreq_setup); +#endif + +static int __init elanfreq_init(void) +{ + struct cpuinfo_x86 *c = cpu_data; + struct cpufreq_driver *driver; + int ret, i; + + /* Test if we have the right hardware */ + if ((c->x86_vendor != X86_VENDOR_AMD) || + (c->x86 != 4) || (c->x86_model!=10)) + { + printk(KERN_INFO "elanfreq: error: no Elan processor found!\n"); + return -ENODEV; + } + + driver = kmalloc(sizeof(struct cpufreq_driver) + + NR_CPUS * sizeof(struct cpufreq_policy), GFP_KERNEL); + if (!driver) + return -ENOMEM; + + driver->policy = (struct cpufreq_policy *) (driver + 1); + + if (!max_freq) + max_freq = elanfreq_get_cpu_frequency(); + + /* table init */ + for (i=0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) { + if (elanfreq_table[i].frequency > max_freq) + elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID; + } + +#ifdef CONFIG_CPU_FREQ_24_API + driver->cpu_cur_freq[0] = elanfreq_get_cpu_frequency(); +#endif + + driver->verify = &elanfreq_verify; + driver->setpolicy = &elanfreq_setpolicy; + + driver->policy[0].cpu = 0; + ret = cpufreq_frequency_table_cpuinfo(&driver->policy[0], &elanfreq_table[0]); + if (ret) { + kfree(driver); + return ret; + } + driver->policy[0].policy = CPUFREQ_POLICY_PERFORMANCE; + driver->policy[0].cpuinfo.transition_latency = CPUFREQ_ETERNAL; + + elanfreq_driver = driver; + + ret = cpufreq_register(driver); + if (ret) { + elanfreq_driver = NULL; + kfree(driver); + } + + return ret; +} + + +static void __exit elanfreq_exit(void) +{ + if (elanfreq_driver) { + cpufreq_unregister(); + kfree(elanfreq_driver); + } +} + +module_init(elanfreq_init); +module_exit(elanfreq_exit); + +MODULE_PARM (max_freq, "i"); + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/entry.S linux.21pre4-ac2/arch/i386/kernel/entry.S --- linux.21pre4/arch/i386/kernel/entry.S 2003-01-29 16:27:05.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/entry.S 2003-01-30 16:46:44.000000000 +0000 @@ -79,7 +79,7 @@ exec_domain = 16 need_resched = 20 tsk_ptrace = 24 -processor = 52 +cpu = 32 ENOSYS = 38 @@ -184,9 +184,11 @@ ENTRY(ret_from_fork) +#if CONFIG_SMP pushl %ebx call SYMBOL_NAME(schedule_tail) addl $4, %esp +#endif GET_CURRENT(%ebx) testb $0x02,tsk_ptrace(%ebx) # PT_TRACESYS jne tracesys_exit @@ -643,10 +645,10 @@ .long SYMBOL_NAME(sys_lremovexattr) .long SYMBOL_NAME(sys_fremovexattr) .long SYMBOL_NAME(sys_tkill) - .long SYMBOL_NAME(sys_ni_syscall) /* reserved for sendfile64 */ + .long SYMBOL_NAME(sys_sendfile64) .long SYMBOL_NAME(sys_ni_syscall) /* 240 reserved for futex */ - .long SYMBOL_NAME(sys_ni_syscall) /* reserved for sched_setaffinity */ - .long SYMBOL_NAME(sys_ni_syscall) /* reserved for sched_getaffinity */ + .long SYMBOL_NAME(sys_sched_setaffinity) + .long SYMBOL_NAME(sys_sched_getaffinity) .long SYMBOL_NAME(sys_ni_syscall) /* sys_set_thread_area */ .long SYMBOL_NAME(sys_ni_syscall) /* sys_get_thread_area */ .long SYMBOL_NAME(sys_ni_syscall) /* 245 sys_io_setup */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/gx-suspmod.c linux.21pre4-ac2/arch/i386/kernel/gx-suspmod.c --- linux.21pre4/arch/i386/kernel/gx-suspmod.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/arch/i386/kernel/gx-suspmod.c 2003-01-28 16:23:47.000000000 +0000 @@ -0,0 +1,514 @@ +/* + * Cyrix MediaGX and NatSemi Geode Suspend Modulation + * (C) 2002 Zwane Mwaikambo + * (C) 2002 Hiroshi Miura + * All Rights Reserved + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation + * + * The author(s) of this software shall not be held liable for damages + * of any nature resulting due to the use of this software. This + * software is provided AS-IS with no warranties. + * + * Theoritical note: + * + * (see Geode(tm) CS5530 manual (rev.4.1) page.56) + * + * CPU frequency control on NatSemi Geode GX1/GXLV processor and CS55x0 + * are based on Suspend Moduration. + * + * Suspend Modulation works by asserting and de-asserting the SUSP# pin + * to CPU(GX1/GXLV) for configurable durations. When asserting SUSP# + * the CPU enters an idle state. GX1 stops its core clock when SUSP# is + * asserted then power consumption is reduced. + * + * Suspend Modulation's OFF/ON duration are configurable + * with 'Suspend Modulation OFF Count Register' + * and 'Suspend Modulation ON Count Register'. + * These registers are 8bit counters that represent the number of + * 32us intervals which the SUSP# pin is asserted/de-asserted to the + * processor. + * + * These counters define a ratio which is the effective frequency + * of operation of the system. + * + * On Count + * F_eff = Fgx * ---------------------- + * On Count + Off Count + * + * 0 <= On Count, Off Count <= 255 + * + * From these limits, we can get register values + * + * on_duration + off_duration <= MAX_DURATION + * off_duration = on_duration * (stock_freq - freq) / freq + * + * on_duration = (freq * DURATION) / stock_freq + * off_duration = DURATION - on_duration + * + * + *--------------------------------------------------------------------------- + * + * ChangeLog: + * Dec. 11, 2002 Hiroshi Miura + * - rewrite for Cyrix MediaGX Cx5510/5520 and + * NatSemi Geode Cs5530(A). + * + * Jul. ??, 2002 Zwane Mwaikambo + * - cs5530_mod patch for 2.4.19-rc1. + * + *--------------------------------------------------------------------------- + * + * Todo + * Test on machines with 5510, 5530, 5530A + */ + +/************************************************************************ + * Suspend Modulation - Definitions * + ************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* PCI config registers, all at F0 */ +#define PCI_PMER1 0x80 /* power management enable register 1 */ +#define PCI_PMER2 0x81 /* power management enable register 2 */ +#define PCI_PMER3 0x82 /* power management enable register 3 */ +#define PCI_IRQTC 0x8c /* irq speedup timer counter register:typical 2 to 4ms */ +#define PCI_VIDTC 0x8d /* video speedup timer counter register: typical 50 to 100ms */ +#define PCI_MODOFF 0x94 /* suspend modulation OFF counter register, 1 = 32us */ +#define PCI_MODON 0x95 /* suspend modulation ON counter register */ +#define PCI_SUSCFG 0x96 /* suspend configuration register */ + +/* PMER1 bits */ +#define GPM (1<<0) /* global power management */ +#define GIT (1<<1) /* globally enable PM device idle timers */ +#define GTR (1<<2) /* globally enable IO traps */ +#define IRQ_SPDUP (1<<3) /* disable clock throttle during interrupt handling */ +#define VID_SPDUP (1<<4) /* disable clock throttle during vga video handling */ + +/* SUSCFG bits */ +#define SUSMOD (1<<0) /* enable/disable suspend modulation */ +/* the belows support only with cs5530 (after rev.1.2)/cs5530A */ +#define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */ + /* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */ +#define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */ +/* the belows support only with cs5530A */ +#define PWRSVE_ISA (1<<3) /* stop ISA clock */ +#define PWRSVE (1<<4) /* active idle */ + +struct gxfreq_params { + u8 on_duration; + u8 off_duration; + u8 pci_suscfg; + u8 pci_pmer1; + u8 pci_pmer2; + u8 pci_rev; + struct pci_dev *cs55x0; +}; + +static struct cpufreq_driver *gx_driver; +static struct gxfreq_params *gx_params; +static int stock_freq; + +/* PCI bus clock - defaults to 30.000 if cpu_khz is not available */ +static int pci_busclk = 0; +MODULE_PARM(pci_busclk, "i"); + +/* maximum duration for which the cpu may be suspended + * (32us * MAX_DURATION). If no parameter is given, this defaults + * to 255. + * Note that this leads to a maximum of 8 ms(!) where the CPU clock + * is suspended -- processing power is just 0.39% of what it used to be, + * though. 781.25 kHz(!) for a 200 MHz processor -- wow. */ +static int max_duration = 255; +MODULE_PARM(max_duration, "i"); + +/* For the default policy, we want at least some processing power + * - let's say 5%. (min = maxfreq / POLICY_MIN_DIV) + */ +#define POLICY_MIN_DIV 20 + + +/* DEBUG + * Define it if you want verbose debug output + */ + +#define SUSPMOD_DEBUG 1 + +#ifdef SUSPMOD_DEBUG +#define dprintk(msg...) printk(KERN_DEBUG "cpufreq:" msg) +#else +#define dprintk(msg...) do { } while(0); +#endif + +/** + * we can detect a core multipiler from dir0_lsb + * from GX1 datasheet p.56, + * MULT[3:0]: + * 0000 = SYSCLK multiplied by 4 (test only) + * 0001 = SYSCLK multiplied by 10 + * 0010 = SYSCLK multiplied by 4 + * 0011 = SYSCLK multiplied by 6 + * 0100 = SYSCLK multiplied by 9 + * 0101 = SYSCLK multiplied by 5 + * 0110 = SYSCLK multiplied by 7 + * 0111 = SYSCLK multiplied by 8 + * of 33.3MHz + **/ +static int gx_freq_mult[16] = { + 4, 10, 4, 6, 9, 5, 7, 8, + 0, 0, 0, 0, 0, 0, 0, 0 +}; + + +/**************************************************************** + * Low Level chipset interface * + ****************************************************************/ +static struct pci_device_id gx_chipset_tbl[] __initdata = { + { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, PCI_ANY_ID, PCI_ANY_ID }, + { 0, }, +}; + +/** + * gx_detect_chipset: + * + **/ +static __init struct pci_dev *gx_detect_chipset(void) +{ + struct pci_dev *gx_pci; + + /* check if CPU is a MediaGX or a Geode. */ + if ((current_cpu_data.x86_vendor != X86_VENDOR_NSC) && + (current_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) { + printk(KERN_INFO "gx-suspmod: error: no MediaGX/Geode processor found!\n"); + return NULL; + } + + /* detect which companion chip is used */ + pci_for_each_dev(gx_pci) { + if ((pci_match_device (gx_chipset_tbl, gx_pci)) != NULL) { + return gx_pci; + } + } + + dprintk(KERN_INFO "gx-suspmod: error: no supported chipset found!\n"); + return NULL; +} + +/** + * gx_get_cpuspeed: + * + * Finds out at which efficient frequency the Cyrix MediaGX/NatSemi Geode CPU runs. + */ +static int gx_get_cpuspeed(void) +{ + if ((gx_params->pci_suscfg & SUSMOD) == 0) + return stock_freq; + + return (stock_freq * gx_params->on_duration) + / (gx_params->on_duration + gx_params->off_duration); +} + +/** + * gx_validate_speed: + * determine current cpu speed + * +**/ + +static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off_duration) +{ + unsigned int i; + u8 tmp_on, tmp_off; + int old_tmp_freq = stock_freq; + int tmp_freq; + + *on_duration=1; + *off_duration=0; + + for (i=max_duration; i>0; i--) { + tmp_on = ((khz * i) / stock_freq) & 0xff; + tmp_off = i - tmp_on; + tmp_freq = (stock_freq * tmp_on) / i; + /* if this relation is closer to khz, use this. If it's equal, + * prefer it, too - lower latency */ + if (abs(tmp_freq - khz) <= abs(old_tmp_freq - khz)) { + *on_duration = tmp_on; + *off_duration = tmp_off; + old_tmp_freq = tmp_freq; + } + } + + return old_tmp_freq; +} + + +/** + * gx_set_cpuspeed: + * set cpu speed in khz. + **/ + +static void gx_set_cpuspeed(unsigned int khz) +{ + u8 suscfg, pmer1; + unsigned int new_khz; + unsigned long flags; + struct cpufreq_freqs freqs; + + + freqs.cpu = 0; + freqs.old = gx_get_cpuspeed(); + + new_khz = gx_validate_speed(khz, &gx_params->on_duration, &gx_params->off_duration); + + freqs.new = new_khz; + + if (new_khz == stock_freq) { /* if new khz == 100% of CPU speed, it is special case */ + local_irq_save(flags); + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, (gx_params->pci_suscfg & ~(SUSMOD))); + pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &(gx_params->pci_suscfg)); + local_irq_restore(flags); + dprintk("suspend modulation disabled: cpu runs 100 percent speed.\n"); + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + return; + } + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + local_irq_save(flags); + switch (gx_params->cs55x0->device) { + case PCI_DEVICE_ID_CYRIX_5530_LEGACY: + pmer1 = gx_params->pci_pmer1 | IRQ_SPDUP | VID_SPDUP; + /* FIXME: need to test other values -- Zwane,Miura */ + pci_write_config_byte(gx_params->cs55x0, PCI_IRQTC, 4); /* typical 2 to 4ms */ + pci_write_config_byte(gx_params->cs55x0, PCI_VIDTC, 100);/* typical 50 to 100ms */ + pci_write_config_byte(gx_params->cs55x0, PCI_PMER1, pmer1); + + if (gx_params->pci_rev < 0x10) { /* CS5530(rev 1.2, 1.3) */ + suscfg = gx_params->pci_suscfg | SUSMOD; + } else { /* CS5530A,B.. */ + suscfg = gx_params->pci_suscfg | SUSMOD | PWRSVE; + } + break; + case PCI_DEVICE_ID_CYRIX_5520: + case PCI_DEVICE_ID_CYRIX_5510: + suscfg = gx_params->pci_suscfg | SUSMOD; + break; + default: + local_irq_restore(flags); + dprintk("fatal: try to set unknown chipset.\n"); + return; + } + + pci_write_config_byte(gx_params->cs55x0, PCI_MODOFF, gx_params->off_duration); + pci_write_config_byte(gx_params->cs55x0, PCI_MODON, gx_params->on_duration); + + pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, suscfg); + pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg); + + local_irq_restore(flags); + + gx_params->pci_suscfg = suscfg; + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + dprintk("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", + gx_params->on_duration * 32, gx_params->off_duration * 32); + dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new); +} + +/**************************************************************** + * High level functions * + ****************************************************************/ + +/* + * cpufreq_gx_verify: test if frequency range is valid + * + * This function checks if a given frequency range in kHz is valid + * for the hardware supported by the driver. + */ + +static int cpufreq_gx_verify(struct cpufreq_policy *policy) +{ + unsigned int tmp_freq = 0; + u8 tmp1, tmp2; + + if (!gx_driver || !stock_freq || !policy) + return -EINVAL; + + policy->cpu = 0; + cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq); + + /* it needs to be assured that at least one supported frequency is + * within policy->min and policy->max. If it is not, policy->max + * needs to be increased until one freuqency is supported. + * policy->min may not be decreased, though. This way we guarantee a + * specific processing capacity. + */ + tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2); + if (tmp_freq < policy->min) + tmp_freq += stock_freq / max_duration; + policy->min = tmp_freq; + if (policy->min > policy->max) + policy->max = tmp_freq; + tmp_freq = gx_validate_speed(policy->max, &tmp1, &tmp2); + if (tmp_freq > policy->max) + tmp_freq -= stock_freq / max_duration; + policy->max = tmp_freq; + if (policy->max < policy->min) + policy->max = policy->min; + cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq); + + return 0; +} + +/* + * cpufreq_gx_setpolicy: + * + */ +static int cpufreq_gx_setpolicy(struct cpufreq_policy *policy) +{ + + if (!gx_driver || !stock_freq || !policy) + return -EINVAL; + + policy->cpu = 0; + + if (policy->policy == CPUFREQ_POLICY_POWERSAVE) { + /* here we need to make sure that we don't set the + * frequency below policy->min (see comment in + * cpufreq_gx_verify() - guarantee of processing + * capacity. + */ + u8 tmp1, tmp2; + unsigned int tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2); + while (tmp_freq < policy->min) { + tmp_freq += stock_freq / max_duration; + tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2); + } + gx_set_cpuspeed(tmp_freq); + } else { + gx_set_cpuspeed(policy->max); + } + return 0; +} + +/* + * cpufreq_gx_init: + * MediaGX/Geode GX initilize cpufreq driver + */ + +static int __init cpufreq_gx_init(void) +{ + int maxfreq,ret,curfreq; + struct cpufreq_driver *driver; + struct gxfreq_params *params; + struct pci_dev *gx_pci; + u32 class_rev; + + /* Test if we have the right hardware */ + if ((gx_pci = gx_detect_chipset()) == NULL) + return -ENODEV; + + /* check whether module parameters are sane */ + if (max_duration > 0xff) + max_duration = 0xff; + + dprintk("geode suspend modulation available.\n"); + + driver = kmalloc(sizeof(struct cpufreq_driver) + NR_CPUS * sizeof(struct cpufreq_policy), GFP_KERNEL); + if (driver == NULL) + return -ENOMEM; + params = kmalloc(sizeof(struct gxfreq_params), GFP_KERNEL); + if (params == NULL) { + kfree(driver); + return -ENOMEM; + } + + driver->policy = (struct cpufreq_policy *)(driver + 1); + params->cs55x0 = gx_pci; + + /* keep cs55x0 configurations */ + pci_read_config_byte(params->cs55x0, PCI_SUSCFG, &(params->pci_suscfg)); + pci_read_config_byte(params->cs55x0, PCI_PMER1, &(params->pci_pmer1)); + pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2)); + pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration)); + pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration)); + pci_read_config_dword(params->cs55x0, PCI_CLASS_REVISION, &class_rev); + params->pci_rev = class_rev && 0xff; + + gx_params = params; + + /* determine maximum frequency */ + if (pci_busclk) { + maxfreq = pci_busclk * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; + } else if (cpu_khz) { + maxfreq = cpu_khz; + } else { + maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; + } + stock_freq = maxfreq; + curfreq = gx_get_cpuspeed(); + + dprintk("cpu max frequency is %d.\n", maxfreq); + dprintk("cpu current frequency is %dkHz.\n",curfreq); + + /* setup basic struct for cpufreq API */ +#ifdef CONFIG_CPU_FREQ_24_API + driver->cpu_cur_freq[0] = curfreq; +#endif + driver->policy[0].cpu = 0; + + if (max_duration < POLICY_MIN_DIV) + driver->policy[0].min = maxfreq / max_duration; + else + driver->policy[0].min = maxfreq / POLICY_MIN_DIV; + driver->policy[0].max = maxfreq; + driver->policy[0].policy = CPUFREQ_POLICY_PERFORMANCE; + driver->policy[0].cpuinfo.min_freq = maxfreq / max_duration; + driver->policy[0].cpuinfo.max_freq = maxfreq; + driver->policy[0].cpuinfo.transition_latency = CPUFREQ_ETERNAL; + driver->verify = &cpufreq_gx_verify; + driver->setpolicy = &cpufreq_gx_setpolicy; + + gx_driver = driver; + + if ((ret = cpufreq_register(driver))) { + kfree(driver); + kfree(params); + return ret; /* register error! */ + } + + return 0; +} + +static void __exit cpufreq_gx_exit(void) +{ + if (gx_driver) { + /* disable throttling */ + gx_set_cpuspeed(stock_freq); + cpufreq_unregister(); + kfree(gx_driver); + kfree(gx_params); + } +} + +MODULE_AUTHOR ("Hiroshi Miura "); +MODULE_DESCRIPTION ("Cpufreq driver for Cyrix MediaGX and NatSemi Geode"); +MODULE_LICENSE ("GPL"); + +module_init(cpufreq_gx_init); +module_exit(cpufreq_gx_exit); + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/head.S linux.21pre4-ac2/arch/i386/kernel/head.S --- linux.21pre4/arch/i386/kernel/head.S 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/head.S 2003-01-06 15:43:53.000000000 +0000 @@ -445,4 +445,15 @@ .quad 0x00409a0000000000 /* 0x48 APM CS code */ .quad 0x00009a0000000000 /* 0x50 APM CS 16 code (16 bit) */ .quad 0x0040920000000000 /* 0x58 APM DS data */ + /* Segments used for calling PnP BIOS */ + .quad 0x00c09a0000000000 /* 0x60 32-bit code */ + .quad 0x00809a0000000000 /* 0x68 16-bit code */ + .quad 0x0080920000000000 /* 0x70 16-bit data */ + .quad 0x0080920000000000 /* 0x78 16-bit data */ + .quad 0x0080920000000000 /* 0x80 16-bit data */ + .quad 0x0000000000000000 /* 0x88 not used */ + .quad 0x0000000000000000 /* 0x90 not used */ + .quad 0x0000000000000000 /* 0x98 not used */ + /* Per CPU segments */ .fill NR_CPUS*4,8,0 /* space for TSS's and LDT's */ + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/i386_ksyms.c linux.21pre4-ac2/arch/i386/kernel/i386_ksyms.c --- linux.21pre4/arch/i386/kernel/i386_ksyms.c 2003-01-29 16:27:05.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/i386_ksyms.c 2003-01-12 01:03:46.000000000 +0000 @@ -28,6 +28,7 @@ #include #include #include +#include extern void dump_thread(struct pt_regs *, struct user *); extern spinlock_t rtc_lock; @@ -49,6 +50,7 @@ EXPORT_SYMBOL(drive_info); #endif +extern unsigned long cpu_khz; extern unsigned long get_cmos_time(void); /* platform dependent support */ @@ -71,6 +73,7 @@ EXPORT_SYMBOL(pm_idle); EXPORT_SYMBOL(pm_power_off); EXPORT_SYMBOL(get_cmos_time); +EXPORT_SYMBOL(cpu_khz); EXPORT_SYMBOL(apm_info); EXPORT_SYMBOL(gdt); EXPORT_SYMBOL(empty_zero_page); @@ -86,6 +89,7 @@ /* Networking helper routines. */ EXPORT_SYMBOL(csum_partial_copy_generic); /* Delay loops */ +EXPORT_SYMBOL(__ndelay); EXPORT_SYMBOL(__udelay); EXPORT_SYMBOL(__delay); EXPORT_SYMBOL(__const_udelay); @@ -129,6 +133,7 @@ EXPORT_SYMBOL(cpu_data); EXPORT_SYMBOL(kernel_flag_cacheline); EXPORT_SYMBOL(smp_num_cpus); +EXPORT_SYMBOL(smp_num_siblings); EXPORT_SYMBOL(cpu_online_map); EXPORT_SYMBOL_NOVERS(__write_lock_failed); EXPORT_SYMBOL_NOVERS(__read_lock_failed); @@ -179,3 +184,8 @@ #ifdef CONFIG_MULTIQUAD EXPORT_SYMBOL(xquad_portio); #endif + +#ifdef CONFIG_EDD_MODULE +EXPORT_SYMBOL(edd); +EXPORT_SYMBOL(eddnr); +#endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/i387.c linux.21pre4-ac2/arch/i386/kernel/i387.c --- linux.21pre4/arch/i386/kernel/i387.c 2003-01-29 16:27:05.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/i387.c 2003-01-06 15:38:26.000000000 +0000 @@ -248,7 +248,7 @@ * FXSR floating point environment conversions. */ -static inline int convert_fxsr_to_user( struct _fpstate *buf, +static int convert_fxsr_to_user( struct _fpstate *buf, struct i387_fxsave_struct *fxsave ) { unsigned long env[7]; @@ -270,13 +270,18 @@ to = &buf->_st[0]; from = (struct _fpxreg *) &fxsave->st_space[0]; for ( i = 0 ; i < 8 ; i++, to++, from++ ) { - if ( __copy_to_user( to, from, sizeof(*to) ) ) + unsigned long *t = (unsigned long *)to; + unsigned long *f = (unsigned long *)from; + + if (__put_user(*f, t) || + __put_user(*(f + 1), t + 1) || + __put_user(from->exponent, &to->exponent)) return 1; } return 0; } -static inline int convert_fxsr_from_user( struct i387_fxsave_struct *fxsave, +static int convert_fxsr_from_user( struct i387_fxsave_struct *fxsave, struct _fpstate *buf ) { unsigned long env[7]; @@ -299,7 +304,12 @@ to = (struct _fpxreg *) &fxsave->st_space[0]; from = &buf->_st[0]; for ( i = 0 ; i < 8 ; i++, to++, from++ ) { - if ( __copy_from_user( to, from, sizeof(*from) ) ) + unsigned long *t = (unsigned long *)to; + unsigned long *f = (unsigned long *)from; + + if (__get_user(*f, t) || + __get_user(*(f + 1), t + 1) || + __get_user(from->exponent, &to->exponent)) return 1; } return 0; @@ -321,7 +331,7 @@ return 1; } -static inline int save_i387_fxsave( struct _fpstate *buf ) +static int save_i387_fxsave( struct _fpstate *buf ) { struct task_struct *tsk = current; int err = 0; @@ -371,7 +381,7 @@ sizeof(struct i387_fsave_struct) ); } -static inline int restore_i387_fxsave( struct _fpstate *buf ) +static int restore_i387_fxsave( struct _fpstate *buf ) { struct task_struct *tsk = current; clear_fpu( tsk ); @@ -389,7 +399,7 @@ if ( HAVE_HWFP ) { if ( cpu_has_fxsr ) { - err = restore_i387_fxsave( buf ); + err = restore_i387_fxsave( buf ); } else { err = restore_i387_fsave( buf ); } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/io_apic.c linux.21pre4-ac2/arch/i386/kernel/io_apic.c --- linux.21pre4/arch/i386/kernel/io_apic.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/io_apic.c 2003-01-31 11:03:59.000000000 +0000 @@ -188,6 +188,86 @@ clear_IO_APIC_pin(apic, pin); } +static void set_ioapic_affinity (unsigned int irq, unsigned long mask) +{ + unsigned long flags; + + /* + * Only the first 8 bits are valid. + */ + mask = mask << 24; + spin_lock_irqsave(&ioapic_lock, flags); + __DO_ACTION(1, = mask, ) + spin_unlock_irqrestore(&ioapic_lock, flags); +} + +#if CONFIG_SMP + +typedef struct { + unsigned int cpu; + unsigned long timestamp; +} ____cacheline_aligned irq_balance_t; + +static irq_balance_t irq_balance[NR_IRQS] __cacheline_aligned + = { [ 0 ... NR_IRQS-1 ] = { 0, 0 } }; + +extern unsigned long irq_affinity [NR_IRQS]; + +#endif + +#define IDLE_ENOUGH(cpu,now) \ + (idle_cpu(cpu) && ((now) - irq_stat[(cpu)].idle_timestamp > 1)) + +#define IRQ_ALLOWED(cpu,allowed_mask) \ + ((1 << cpu) & (allowed_mask)) + +static unsigned long move(int curr_cpu, unsigned long allowed_mask, unsigned long now, int direction) +{ + int search_idle = 1; + int cpu = curr_cpu; + + goto inside; + + do { + if (unlikely(cpu == curr_cpu)) + search_idle = 0; +inside: + if (direction == 1) { + cpu++; + if (cpu >= smp_num_cpus) + cpu = 0; + } else { + cpu--; + if (cpu == -1) + cpu = smp_num_cpus-1; + } + } while (!IRQ_ALLOWED(cpu,allowed_mask) || + (search_idle && !IDLE_ENOUGH(cpu,now))); + + return cpu; +} + +static inline void balance_irq(int irq) +{ +#if CONFIG_SMP + irq_balance_t *entry = irq_balance + irq; + unsigned long now = jiffies; + + if (unlikely(entry->timestamp != now)) { + unsigned long allowed_mask; + int random_number; + + rdtscl(random_number); + random_number &= 1; + + allowed_mask = cpu_online_map & irq_affinity[irq]; + entry->timestamp = now; + entry->cpu = move(entry->cpu, allowed_mask, now, random_number); + set_ioapic_affinity(irq, apicid_to_phys_cpu_present(entry->cpu)); + } +#endif +} + /* * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to * specific CPU-side IRQs. @@ -795,6 +875,7 @@ printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.version); if ( (reg_01.version != 0x01) && /* 82489DX IO-APICs */ (reg_01.version != 0x02) && /* VIA */ + (reg_01.version != 0x03) && /* later VIA */ (reg_01.version != 0x10) && /* oldest IO-APICs */ (reg_01.version != 0x11) && /* Pentium/Pro IO-APICs */ (reg_01.version != 0x13) && /* Xeon IO-APICs */ @@ -1218,6 +1299,7 @@ */ static void ack_edge_ioapic_irq(unsigned int irq) { + balance_irq(irq); if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED)) == (IRQ_PENDING | IRQ_DISABLED)) mask_IO_APIC_irq(irq); @@ -1257,6 +1339,7 @@ unsigned long v; int i; + balance_irq(irq); /* * It appears there is an erratum which affects at least version 0x11 * of I/O APIC (that's the 82093AA and cores integrated into various @@ -1313,19 +1396,6 @@ static void mask_and_ack_level_ioapic_irq (unsigned int irq) { /* nothing */ } -static void set_ioapic_affinity (unsigned int irq, unsigned long mask) -{ - unsigned long flags; - /* - * Only the first 8 bits are valid. - */ - mask = mask << 24; - - spin_lock_irqsave(&ioapic_lock, flags); - __DO_ACTION(1, = mask, ) - spin_unlock_irqrestore(&ioapic_lock, flags); -} - /* * Level and edge triggered IO-APIC interrupts need different handling, * so we use two separate IRQ descriptors. Edge triggered IRQs can be diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/irq.c linux.21pre4-ac2/arch/i386/kernel/irq.c --- linux.21pre4/arch/i386/kernel/irq.c 2003-01-29 16:27:05.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/irq.c 2003-01-06 21:42:11.000000000 +0000 @@ -1090,7 +1090,7 @@ static struct proc_dir_entry * smp_affinity_entry [NR_IRQS]; -static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL }; +unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL }; static int irq_affinity_read_proc (char *page, char **start, off_t off, int count, int *eof, void *data) { diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/ldt.c linux.21pre4-ac2/arch/i386/kernel/ldt.c --- linux.21pre4/arch/i386/kernel/ldt.c 2003-01-29 16:27:05.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/ldt.c 2003-01-06 19:10:59.000000000 +0000 @@ -12,37 +12,139 @@ #include #include #include +#include #include #include #include #include +#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */ +static void flush_ldt(void *mm) +{ + if (current->active_mm) + load_LDT(¤t->active_mm->context); +} +#endif + +static int alloc_ldt(mm_context_t *pc, int mincount, int reload) +{ + void *oldldt; + void *newldt; + int oldsize; + + if (mincount <= pc->size) + return 0; + oldsize = pc->size; + mincount = (mincount+511)&(~511); + if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE) + newldt = vmalloc(mincount*LDT_ENTRY_SIZE); + else + newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL); + + if (!newldt) + return -ENOMEM; + + if (oldsize) + memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE); + + oldldt = pc->ldt; + memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE); + wmb(); + pc->ldt = newldt; + pc->size = mincount; + if (reload) { + load_LDT(pc); +#ifdef CONFIG_SMP + if (current->mm->cpu_vm_mask != (1< PAGE_SIZE) + vfree(oldldt); + else + kfree(oldldt); + } + return 0; +} + +static inline int copy_ldt(mm_context_t *new, mm_context_t *old) +{ + int err = alloc_ldt(new, old->size, 0); + if (err < 0) { + printk(KERN_WARNING "ldt allocation failed\n"); + new->size = 0; + return err; + } + memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE); + return 0; +} + +/* + * we do not have to muck with descriptors here, that is + * done in switch_mm() as needed. + */ +int init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + struct mm_struct * old_mm; + int retval = 0; + + init_MUTEX(&mm->context.sem); + mm->context.size = 0; + old_mm = current->mm; + if (old_mm && old_mm->context.size > 0) { + down(&old_mm->context.sem); + retval = copy_ldt(&mm->context, &old_mm->context); + up(&old_mm->context.sem); + } + return retval; +} + /* - * read_ldt() is not really atomic - this is not a problem since - * synchronization of reads and writes done to the LDT has to be - * assured by user-space anyway. Writes are atomic, to protect - * the security checks done on new descriptors. + * No need to lock the MM as we are the last user + * Do not touch the ldt register, we are already + * in the next thread. */ +void destroy_context(struct mm_struct *mm) +{ + if (mm->context.size) { + if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE) + vfree(mm->context.ldt); + else + kfree(mm->context.ldt); + mm->context.size = 0; + } +} + static int read_ldt(void * ptr, unsigned long bytecount) { int err; unsigned long size; struct mm_struct * mm = current->mm; - err = 0; - if (!mm->context.segments) - goto out; + if (!mm->context.size) + return 0; + if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) + bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; - size = LDT_ENTRIES*LDT_ENTRY_SIZE; + down(&mm->context.sem); + size = mm->context.size*LDT_ENTRY_SIZE; if (size > bytecount) size = bytecount; - err = size; - if (copy_to_user(ptr, mm->context.segments, size)) + err = 0; + if (copy_to_user(ptr, mm->context.ldt, size)) err = -EFAULT; -out: - return err; + up(&mm->context.sem); + if (err < 0) + return err; + if (size != bytecount) { + /* zero-fill the rest */ + clear_user(ptr+size, bytecount-size); + } + return bytecount; } static int read_default_ldt(void * ptr, unsigned long bytecount) @@ -53,7 +155,7 @@ err = 0; address = &default_ldt[0]; - size = sizeof(struct desc_struct); + size = 5*sizeof(struct desc_struct); if (size > bytecount) size = bytecount; @@ -88,24 +190,14 @@ goto out; } - /* - * the GDT index of the LDT is allocated dynamically, and is - * limited by MAX_LDT_DESCRIPTORS. - */ - down_write(&mm->mmap_sem); - if (!mm->context.segments) { - void * segments = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE); - error = -ENOMEM; - if (!segments) + down(&mm->context.sem); + if (ldt_info.entry_number >= mm->context.size) { + error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1); + if (error < 0) goto out_unlock; - memset(segments, 0, LDT_ENTRIES*LDT_ENTRY_SIZE); - wmb(); - mm->context.segments = segments; - mm->context.cpuvalid = 1UL << smp_processor_id(); - load_LDT(mm); } - lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.segments); + lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt); /* Allow LDTs to be cleared by the user. */ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { @@ -143,7 +235,7 @@ error = 0; out_unlock: - up_write(&mm->mmap_sem); + up(&mm->context.sem); out: return error; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/longhaul.c linux.21pre4-ac2/arch/i386/kernel/longhaul.c --- linux.21pre4/arch/i386/kernel/longhaul.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/arch/i386/kernel/longhaul.c 2003-01-10 16:25:37.000000000 +0000 @@ -0,0 +1,813 @@ +/* + * $Id: longhaul.c,v 1.83 2003/01/02 22:16:26 db Exp $ + * + * (C) 2001 Dave Jones. + * (C) 2002 Padraig Brady. + * + * Licensed under the terms of the GNU GPL License version 2. + * Based upon datasheets & sample CPUs kindly provided by VIA. + * + * VIA have currently 3 different versions of Longhaul. + * + * +---------------------+----------+---------------------------------+ + * | Marketing name | Codename | longhaul version / features. | + * +---------------------+----------+---------------------------------+ + * | Samuel/CyrixIII | C5A | v1 : multipliers only | + * | Samuel2/C3 | C3E/C5B | v1 : multiplier only | + * | Ezra | C5C | v2 : multipliers & voltage | + * | Ezra-T | C5M/C5N | v3 : multipliers, voltage & FSB | + * +---------------------+----------+---------------------------------+ + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define DEBUG + +#ifdef DEBUG +#define dprintk(msg...) printk(msg) +#else +#define dprintk(msg...) do { } while(0); +#endif + +static int numscales=16, numvscales; +static int minvid, maxvid; +static int can_scale_voltage; +static int can_scale_fsb; +static int vrmrev; + + +/* Module parameters */ +static int dont_scale_voltage; +static int dont_scale_fsb; +static int current_fsb; + +#define __hlt() __asm__ __volatile__("hlt": : :"memory") + +/* + * Clock ratio tables. + * The eblcr ones specify the ratio read from the CPU. + * The clock_ratio ones specify what to write to the CPU. + */ + +/* VIA C3 Samuel 1 & Samuel 2 (stepping 0)*/ +static int __initdata longhaul1_clock_ratio[16] = { + -1, /* 0000 -> RESERVED */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + -1, /* 0011 -> RESERVED */ + -1, /* 0100 -> RESERVED */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 55, /* 0111 -> 5.5x */ + 60, /* 1000 -> 6.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 50, /* 1011 -> 5.0x */ + 65, /* 1100 -> 6.5x */ + 75, /* 1101 -> 7.5x */ + -1, /* 1110 -> RESERVED */ + -1, /* 1111 -> RESERVED */ +}; + +static int __initdata samuel1_eblcr[16] = { + 50, /* 0000 -> RESERVED */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + -1, /* 0011 -> RESERVED */ + 55, /* 0100 -> 5.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + -1, /* 0111 -> RESERVED */ + -1, /* 1000 -> RESERVED */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 60, /* 1011 -> 6.0x */ + -1, /* 1100 -> RESERVED */ + 75, /* 1101 -> 7.5x */ + -1, /* 1110 -> RESERVED */ + 65, /* 1111 -> 6.5x */ +}; + +/* VIA C3 Samuel2 Stepping 1->15 & VIA C3 Ezra */ +static int __initdata longhaul2_clock_ratio[16] = { + 100, /* 0000 -> 10.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 90, /* 0011 -> 9.0x */ + 95, /* 0100 -> 9.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 55, /* 0111 -> 5.5x */ + 60, /* 1000 -> 6.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 50, /* 1011 -> 5.0x */ + 65, /* 1100 -> 6.5x */ + 75, /* 1101 -> 7.5x */ + 85, /* 1110 -> 8.5x */ + 120, /* 1111 -> 12.0x */ +}; + +static int __initdata samuel2_eblcr[16] = { + 50, /* 0000 -> 5.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 100, /* 0011 -> 10.0x */ + 55, /* 0100 -> 5.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 110, /* 0111 -> 11.0x */ + 90, /* 1000 -> 9.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 60, /* 1011 -> 6.0x */ + 120, /* 1100 -> 12.0x */ + 75, /* 1101 -> 7.5x */ + 130, /* 1110 -> 13.0x */ + 65, /* 1111 -> 6.5x */ +}; + +static int __initdata ezra_eblcr[16] = { + 50, /* 0000 -> 5.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 100, /* 0011 -> 10.0x */ + 55, /* 0100 -> 5.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 95, /* 0111 -> 9.5x */ + 90, /* 1000 -> 9.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 60, /* 1011 -> 6.0x */ + 120, /* 1100 -> 12.0x */ + 75, /* 1101 -> 7.5x */ + 85, /* 1110 -> 8.5x */ + 65, /* 1111 -> 6.5x */ +}; + +/* VIA C5M. */ +static int __initdata longhaul3_clock_ratio[32] = { + 100, /* 0000 -> 10.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 90, /* 0011 -> 9.0x */ + 95, /* 0100 -> 9.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 55, /* 0111 -> 5.5x */ + 60, /* 1000 -> 6.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 50, /* 1011 -> 5.0x */ + 65, /* 1100 -> 6.5x */ + 75, /* 1101 -> 7.5x */ + 85, /* 1110 -> 8.5x */ + 120, /* 1111 -> 12.0x */ + + -1, /* 0000 -> RESERVED (10.0x) */ + 110, /* 0001 -> 11.0x */ + 120, /* 0010 -> 12.0x */ + -1, /* 0011 -> RESERVED (9.0x)*/ + 105, /* 0100 -> 10.5x */ + 115, /* 0101 -> 11.5x */ + 125, /* 0110 -> 12.5x */ + 135, /* 0111 -> 13.5x */ + 140, /* 1000 -> 14.0x */ + 150, /* 1001 -> 15.0x */ + 160, /* 1010 -> 16.0x */ + 130, /* 1011 -> 13.0x */ + 145, /* 1100 -> 14.5x */ + 155, /* 1101 -> 15.5x */ + -1, /* 1110 -> RESERVED (13.0x) */ + -1, /* 1111 -> RESERVED (12.0x) */ +}; + +static int __initdata c5m_eblcr[32] = { + 50, /* 0000 -> 5.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 100, /* 0011 -> 10.0x */ + 55, /* 0100 -> 5.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 95, /* 0111 -> 9.5x */ + 90, /* 1000 -> 9.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 60, /* 1011 -> 6.0x */ + 120, /* 1100 -> 12.0x */ + 75, /* 1101 -> 7.5x */ + 85, /* 1110 -> 8.5x */ + 65, /* 1111 -> 6.5x */ + + -1, /* 0000 -> RESERVED (9.0x) */ + 110, /* 0001 -> 11.0x */ + 120, /* 0010 -> 12.0x */ + -1, /* 0011 -> RESERVED (10.0x)*/ + 135, /* 0100 -> 13.5x */ + 115, /* 0101 -> 11.5x */ + 125, /* 0110 -> 12.5x */ + 105, /* 0111 -> 10.5x */ + 130, /* 1000 -> 13.0x */ + 150, /* 1001 -> 15.0x */ + 160, /* 1010 -> 16.0x */ + 140, /* 1011 -> 14.0x */ + -1, /* 1100 -> RESERVED (12.0x) */ + 155, /* 1101 -> 15.5x */ + -1, /* 1110 -> RESERVED (13.0x) */ + 145, /* 1111 -> 14.5x */ +}; + +/* fsb values as defined in CPU */ +static unsigned int eblcr_fsb_table[] = { 66, 133, 100, -1 }; +/* fsb values to favour low fsb speed (lower power) */ +static unsigned int power_fsb_table[] = { 66, 100, 133, -1 }; +/* fsb values to favour high fsb speed (for e.g. if lowering CPU + freq because of heat, but want to maintain highest performance possible) */ +static unsigned int perf_fsb_table[] = { 133, 100, 66, -1 }; +static unsigned int *fsb_search_table; + +/* Voltage scales. Div by 1000 to get actual voltage. */ +static int __initdata vrm85scales[32] = { + 1250, 1200, 1150, 1100, 1050, 1800, 1750, 1700, + 1650, 1600, 1550, 1500, 1450, 1400, 1350, 1300, + 1275, 1225, 1175, 1125, 1075, 1825, 1775, 1725, + 1675, 1625, 1575, 1525, 1475, 1425, 1375, 1325, +}; + +static int __initdata mobilevrmscales[32] = { + 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, + 1600, 1550, 1500, 1450, 1500, 1350, 1300, -1, + 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, + 1075, 1050, 1025, 1000, 975, 950, 925, -1, +}; + +/* Clock ratios multiplied by 10 */ +static int clock_ratio[32]; +static int eblcr_table[32]; +static int voltage_table[32]; +static int highest_speed, lowest_speed; /* kHz */ +static int longhaul; /* version. */ +static struct cpufreq_driver *longhaul_driver; + + +static int longhaul_get_cpu_fsb (void) +{ + unsigned long invalue=0,lo, hi; + + if (current_fsb == 0) { + rdmsr (MSR_IA32_EBL_CR_POWERON, lo, hi); + invalue = (lo & (1<<18|1<<19)) >>18; + return eblcr_fsb_table[invalue]; + } else { + return current_fsb; + } +} + + +static int longhaul_get_cpu_mult (void) +{ + unsigned long invalue=0,lo, hi; + + rdmsr (MSR_IA32_EBL_CR_POWERON, lo, hi); + invalue = (lo & (1<<22|1<<23|1<<24|1<<25)) >>22; + if (longhaul==3) { + if (lo & (1<<27)) + invalue+=16; + } + return eblcr_table[invalue]; +} + + +/** + * longhaul_set_cpu_frequency() + * @clock_ratio_index : index of clock_ratio[] for new frequency + * @newfsb: the new FSB + * + * Sets a new clock ratio, and -if applicable- a new Front Side Bus + */ + +static void longhaul_setstate (unsigned int clock_ratio_index, unsigned int newfsb) +{ + unsigned long lo, hi; + unsigned int bits; + int revkey; + int vidindex, i; + struct cpufreq_freqs freqs; + + if (!newfsb || (clock_ratio[clock_ratio_index] == -1)) + return; + + if ((!can_scale_fsb) && (newfsb != current_fsb)) + return; + + if (((clock_ratio[clock_ratio_index] * newfsb * 100) > highest_speed) || + ((clock_ratio[clock_ratio_index] * newfsb * 100) < lowest_speed)) + return; + + freqs.old = longhaul_get_cpu_mult() * longhaul_get_cpu_fsb() * 100; + freqs.new = clock_ratio[clock_ratio_index] * newfsb * 100; + freqs.cpu = 0; /* longhaul.c is UP only driver */ + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + dprintk (KERN_INFO "longhaul: New FSB:%d Mult(x10):%d\n", + newfsb, clock_ratio[clock_ratio_index]); + + bits = clock_ratio_index; + /* "bits" contains the bitpattern of the new multiplier. + we now need to transform it to the desired format. */ + + switch (longhaul) { + case 1: + rdmsr (MSR_VIA_BCR2, lo, hi); + revkey = (lo & 0xf)<<4; /* Rev key. */ + lo &= ~(1<<23|1<<24|1<<25|1<<26); + lo |= (1<<19); /* Enable software clock multiplier */ + lo |= (bits<<23); /* desired multiplier */ + lo |= revkey; + wrmsr (MSR_VIA_BCR2, lo, hi); + + __hlt(); + + /* Disable software clock multiplier */ + rdmsr (MSR_VIA_BCR2, lo, hi); + lo &= ~(1<<19); + lo |= revkey; + wrmsr (MSR_VIA_BCR2, lo, hi); + break; + + case 2: + rdmsr (MSR_VIA_LONGHAUL, lo, hi); + revkey = (lo & 0xf)<<4; /* Rev key. */ + lo &= 0xfff0bf0f; /* reset [19:16,14](bus ratio) and [7:4](rev key) to 0 */ + lo |= (bits<<16); + lo |= (1<<8); /* EnableSoftBusRatio */ + lo |= revkey; + + if (can_scale_voltage) { + if (can_scale_fsb==1) { + dprintk (KERN_INFO "longhaul: Voltage scaling + FSB scaling not done yet.\n"); + goto bad_voltage; + } else { + /* PB: TODO fix this up */ + vidindex = (((highest_speed-lowest_speed) / (newfsb/2)) - + ((highest_speed-((clock_ratio[clock_ratio_index] * newfsb * 100)/1000)) / (newfsb/2))); + } + for (i=0;i<32;i++) { + dprintk (KERN_INFO "VID hunting. Looking for %d, found %d\n", + minvid+(vidindex*25), voltage_table[i]); + if (voltage_table[i]==(minvid + (vidindex * 25))) + break; + } + if (i==32) + goto bad_voltage; + + dprintk (KERN_INFO "longhaul: Desired vid index=%d\n", i); +#if 0 + lo &= 0xfe0fffff;/* reset [24:20](voltage) to 0 */ + lo |= (i<<20); /* set voltage */ + lo |= (1<<9); /* EnableSoftVID */ +#endif + } + +bad_voltage: + wrmsr (MSR_VIA_LONGHAUL, lo, hi); + __hlt(); + + rdmsr (MSR_VIA_LONGHAUL, lo, hi); + lo &= ~(1<<8); + if (can_scale_voltage) + lo &= ~(1<<9); + lo |= revkey; + wrmsr (MSR_VIA_LONGHAUL, lo, hi); + break; + + case 3: + rdmsr (MSR_VIA_LONGHAUL, lo, hi); + revkey = (lo & 0xf)<<4; /* Rev key. */ + lo &= 0xfff0bf0f; /* reset longhaul[19:16,14] to 0 */ + lo |= (bits<<16); + lo |= (1<<8); /* EnableSoftBusRatio */ + lo |= revkey; + + /* Set FSB */ + if (can_scale_fsb==1) { + lo &= ~(1<<28|1<<29); + switch (newfsb) { + case 66: lo |= (1<<28|1<<29); /* 11 */ + break; + case 100: lo |= 1<<28; /* 01 */ + break; + case 133: break; /* 00*/ + } + } + wrmsr (MSR_VIA_LONGHAUL, lo, hi); + __hlt(); + + rdmsr (MSR_VIA_LONGHAUL, lo, hi); + lo &= ~(1<<8); + lo |= revkey; + wrmsr (MSR_VIA_LONGHAUL, lo, hi); + break; + } + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); +} + + +static void __init longhaul_get_ranges (void) +{ + unsigned long lo, hi, invalue; + unsigned int minmult=0, maxmult=0, minfsb=0, maxfsb=0; + unsigned int multipliers[32]= { + 50,30,40,100,55,35,45,95,90,70,80,60,120,75,85,65, + -1,110,120,-1,135,115,125,105,130,150,160,140,-1,155,-1,145 }; + unsigned int fsb_table[4] = { 133, 100, -1, 66 }; + + switch (longhaul) { + case 1: + /* Ugh, Longhaul v1 didn't have the min/max MSRs. + Assume min=3.0x & max = whatever we booted at. */ + minmult = 30; + maxmult = longhaul_get_cpu_mult(); + minfsb = maxfsb = current_fsb; + break; + + case 2 ... 3: + rdmsr (MSR_VIA_LONGHAUL, lo, hi); + + invalue = (hi & (1<<0|1<<1|1<<2|1<<3)); + if (hi & (1<<11)) + invalue += 16; + maxmult=multipliers[invalue]; + +#if 0 /* This is MaxMhz @ Min Voltage. Ignore for now */ + invalue = (hi & (1<<16|1<<17|1<<18|1<<19)) >> 16; + if (hi & (1<<27)) + invalue += 16; + minmult = multipliers[invalue]; +#else + minmult = 30; /* as per spec */ +#endif + + if (can_scale_fsb==1) { + invalue = (hi & (1<<9|1<<10)) >> 9; + maxfsb = fsb_table[invalue]; + + invalue = (hi & (1<<25|1<<26)) >> 25; + minfsb = fsb_table[invalue]; + + dprintk (KERN_INFO "longhaul: Min FSB=%d Max FSB=%d\n", + minfsb, maxfsb); + } else { + minfsb = maxfsb = current_fsb; + } + break; + } + + highest_speed = maxmult * maxfsb * 100; + lowest_speed = minmult * minfsb * 100; + + dprintk (KERN_INFO "longhaul: MinMult(x10)=%d MaxMult(x10)=%d\n", + minmult, maxmult); + dprintk (KERN_INFO "longhaul: Lowestspeed=%d Highestspeed=%d\n", + lowest_speed, highest_speed); +} + + +static void __init longhaul_setup_voltagescaling (unsigned long lo, unsigned long hi) +{ + int revkey; + + can_scale_voltage = 1; + + minvid = (hi & (1<<20|1<<21|1<<22|1<<23|1<<24)) >> 20; /* 56:52 */ + maxvid = (hi & (1<<4|1<<5|1<<6|1<<7|1<<8)) >> 4; /* 40:36 */ + vrmrev = (lo & (1<<15))>>15; + + if (vrmrev==0) { + dprintk (KERN_INFO "longhaul: VRM 8.5 : "); + memcpy (voltage_table, vrm85scales, sizeof(voltage_table)); + numvscales = (voltage_table[maxvid]-voltage_table[minvid])/25; + } else { + dprintk (KERN_INFO "longhaul: Mobile VRM : "); + memcpy (voltage_table, mobilevrmscales, sizeof(voltage_table)); + numvscales = (voltage_table[maxvid]-voltage_table[minvid])/5; + } + + /* Current voltage isn't readable at first, so we need to + set it to a known value. The spec says to use maxvid */ + revkey = (lo & 0xf)<<4; /* Rev key. */ + lo &= 0xfe0fff0f; /* Mask unneeded bits */ + lo |= (1<<9); /* EnableSoftVID */ + lo |= revkey; /* Reinsert key */ + lo |= maxvid << 20; + wrmsr (MSR_VIA_LONGHAUL, lo, hi); + minvid = voltage_table[minvid]; + maxvid = voltage_table[maxvid]; + dprintk ("Min VID=%d.%03d Max VID=%d.%03d, %d possible voltage scales\n", + maxvid/1000, maxvid%1000, minvid/1000, minvid%1000, numvscales); +} + + +static inline unsigned int longhaul_statecount_fsb(struct cpufreq_policy *policy, unsigned int fsb) { + unsigned int i, count = 0; + + for(i=0; imax) && + ((clock_ratio[i] * fsb * 100) >= policy->min)) + count++; + } + + return count; +} + + +static int longhaul_verify(struct cpufreq_policy *policy) +{ + unsigned int number_states = 0; + unsigned int i; + unsigned int fsb_index = 0; + unsigned int tmpfreq = 0; + unsigned int newmax = -1; + + if (!policy || !longhaul_driver) + return -EINVAL; + + policy->cpu = 0; + cpufreq_verify_within_limits(policy, lowest_speed, highest_speed); + + if (can_scale_fsb==1) { + for (fsb_index=0; fsb_search_table[fsb_index]!=-1; fsb_index++) + number_states += longhaul_statecount_fsb(policy, fsb_search_table[fsb_index]); + } else + number_states = longhaul_statecount_fsb(policy, current_fsb); + + if (number_states) + return 0; + + /* get frequency closest above current policy->max */ + if (can_scale_fsb==1) { + for (fsb_index=0; fsb_search_table[fsb_index] != -1; fsb_index++) + for(i=0; i policy->max) && + (tmpfreq < newmax)) + newmax = tmpfreq; + } + } else { + for(i=0; i policy->max) && + (tmpfreq < newmax)) + newmax = tmpfreq; + } + } + + policy->max = newmax; + + cpufreq_verify_within_limits(policy, lowest_speed, highest_speed); + + return 0; +} + + +static int longhaul_get_best_freq_for_fsb(struct cpufreq_policy *policy, + unsigned int min_mult, + unsigned int max_mult, + unsigned int fsb, + unsigned int *new_mult) +{ + unsigned int optimal = 0; + unsigned int found_optimal = 0; + unsigned int i; + + switch(policy->policy) { + case CPUFREQ_POLICY_POWERSAVE: + optimal = max_mult; + break; + case CPUFREQ_POLICY_PERFORMANCE: + optimal = min_mult; + } + + for(i=0; i policy->max) || + (freq < policy->min)) + continue; + switch(policy->policy) { + case CPUFREQ_POLICY_POWERSAVE: + if (clock_ratio[i] < clock_ratio[optimal]) { + found_optimal = 1; + optimal = i; + } + break; + case CPUFREQ_POLICY_PERFORMANCE: + if (clock_ratio[i] > clock_ratio[optimal]) { + found_optimal = 1; + optimal = i; + } + break; + } + } + + if (found_optimal) { + *new_mult = optimal; + return 1; + } + return 0; +} + + +static int longhaul_setpolicy (struct cpufreq_policy *policy) +{ + unsigned int i; + unsigned int fsb_index = 0; + unsigned int new_fsb = 0; + unsigned int new_clock_ratio = 0; + unsigned int min_mult = 0; + unsigned int max_mult = 0; + + + if (!longhaul_driver) + return -EINVAL; + + if (policy->policy==CPUFREQ_POLICY_PERFORMANCE) + fsb_search_table = perf_fsb_table; + else + fsb_search_table = power_fsb_table; + + for(i=0;i clock_ratio[i]) + min_mult = i; + } + + if (can_scale_fsb==1) { + unsigned int found = 0; + for (fsb_index=0; fsb_search_table[fsb_index]!=-1; fsb_index++) + { + if (longhaul_get_best_freq_for_fsb(policy, + min_mult, max_mult, + fsb_search_table[fsb_index], + &new_clock_ratio)) { + new_fsb = fsb_search_table[fsb_index]; + break; + } + } + if (!found) + return -EINVAL; + } else { + new_fsb = current_fsb; + if (!longhaul_get_best_freq_for_fsb(policy, min_mult, + max_mult, new_fsb, &new_clock_ratio)) + return -EINVAL; + } + + longhaul_setstate(new_clock_ratio, new_fsb); + + return 0; +} + + +static int __init longhaul_init (void) +{ + struct cpuinfo_x86 *c = cpu_data; + unsigned int currentspeed; + static int currentmult; + unsigned long lo, hi; + int ret; + struct cpufreq_driver *driver; + + if ((c->x86_vendor != X86_VENDOR_CENTAUR) || (c->x86 !=6) ) + return -ENODEV; + + switch (c->x86_model) { + case 6: /* VIA C3 Samuel C5A */ + longhaul=1; + memcpy (clock_ratio, longhaul1_clock_ratio, sizeof(longhaul1_clock_ratio)); + memcpy (eblcr_table, samuel1_eblcr, sizeof(samuel1_eblcr)); + break; + + case 7: /* C5B / C5C */ + switch (c->x86_mask) { + case 0: + longhaul=1; + memcpy (clock_ratio, longhaul1_clock_ratio, sizeof(longhaul1_clock_ratio)); + memcpy (eblcr_table, samuel2_eblcr, sizeof(samuel2_eblcr)); + break; + case 1 ... 15: + longhaul=2; + memcpy (clock_ratio, longhaul2_clock_ratio, sizeof(longhaul2_clock_ratio)); + memcpy (eblcr_table, ezra_eblcr, sizeof(ezra_eblcr)); + break; + } + break; + + case 8: /* C5M/C5N */ + return -ENODEV; // Waiting on updated docs from VIA before this is usable + longhaul=3; + numscales=32; + memcpy (clock_ratio, longhaul3_clock_ratio, sizeof(longhaul3_clock_ratio)); + memcpy (eblcr_table, c5m_eblcr, sizeof(c5m_eblcr)); + break; + + default: + printk (KERN_INFO "longhaul: Unknown VIA CPU. Contact davej@suse.de\n"); + return -ENODEV; + } + + printk (KERN_INFO "longhaul: VIA CPU detected. Longhaul version %d supported\n", longhaul); + + current_fsb = longhaul_get_cpu_fsb(); + currentmult = longhaul_get_cpu_mult(); + currentspeed = currentmult * current_fsb * 100; + + dprintk (KERN_INFO "longhaul: CPU currently at %dMHz (%d x %d.%d)\n", + (currentspeed/1000), current_fsb, currentmult/10, currentmult%10); + + if (longhaul==2 || longhaul==3) { + rdmsr (MSR_VIA_LONGHAUL, lo, hi); + if ((lo & (1<<0)) && (dont_scale_voltage==0)) + longhaul_setup_voltagescaling (lo, hi); + + if ((lo & (1<<1)) && (dont_scale_fsb==0) && (current_fsb==0)) + can_scale_fsb = 1; + } + + longhaul_get_ranges(); + + driver = kmalloc(sizeof(struct cpufreq_driver) + + NR_CPUS * sizeof(struct cpufreq_policy), GFP_KERNEL); + if (!driver) + return -ENOMEM; + + driver->policy = (struct cpufreq_policy *) (driver + 1); + +#ifdef CONFIG_CPU_FREQ_24_API + driver->cpu_cur_freq[0] = currentspeed; +#endif + + driver->verify = &longhaul_verify; + driver->setpolicy = &longhaul_setpolicy; + + driver->policy[0].cpu = 0; + driver->policy[0].min = (unsigned int) lowest_speed; + driver->policy[0].max = (unsigned int) highest_speed; + driver->policy[0].policy = CPUFREQ_POLICY_PERFORMANCE; + driver->policy[0].cpuinfo.min_freq = (unsigned int) lowest_speed; + driver->policy[0].cpuinfo.max_freq = (unsigned int) highest_speed; + driver->policy[0].cpuinfo.transition_latency = CPUFREQ_ETERNAL; + + longhaul_driver = driver; + + ret = cpufreq_register(driver); + if (ret) { + longhaul_driver = NULL; + kfree(driver); + } + + return ret; +} + + +static void __exit longhaul_exit (void) +{ + if (longhaul_driver) { + cpufreq_unregister(); + kfree(longhaul_driver); + } +} + +MODULE_PARM (dont_scale_fsb, "i"); +MODULE_PARM (dont_scale_voltage, "i"); +MODULE_PARM (current_fsb, "i"); + +MODULE_AUTHOR ("Dave Jones "); +MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); +MODULE_LICENSE ("GPL"); + +module_init(longhaul_init); +module_exit(longhaul_exit); + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/longrun.c linux.21pre4-ac2/arch/i386/kernel/longrun.c --- linux.21pre4/arch/i386/kernel/longrun.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/arch/i386/kernel/longrun.c 2003-01-10 16:25:37.000000000 +0000 @@ -0,0 +1,292 @@ +/* + * $Id: longrun.c,v 1.18 2003/01/02 22:16:26 db Exp $ + * + * (C) 2002 Dominik Brodowski + * + * Licensed under the terms of the GNU GPL License version 2. + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +static struct cpufreq_driver *longrun_driver; + +/** + * longrun_{low,high}_freq is needed for the conversion of cpufreq kHz + * values into per cent values. In TMTA microcode, the following is valid: + * performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) + */ +static unsigned int longrun_low_freq, longrun_high_freq; + + +/** + * longrun_get_policy - get the current LongRun policy + * @policy: struct cpufreq_policy where current policy is written into + * + * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS + * and MSR_TMTA_LONGRUN_CTRL + */ +static void longrun_get_policy(struct cpufreq_policy *policy) +{ + u32 msr_lo, msr_hi; + + if (!longrun_driver) + return; + + rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); + if (msr_lo & 0x01) + policy->policy = CPUFREQ_POLICY_PERFORMANCE; + else + policy->policy = CPUFREQ_POLICY_POWERSAVE; + + rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + msr_lo &= 0x0000007F; + msr_hi &= 0x0000007F; + + policy->min = longrun_low_freq + msr_lo * + ((longrun_high_freq - longrun_low_freq) / 100); + policy->min = longrun_low_freq + msr_hi * + ((longrun_high_freq - longrun_low_freq) / 100); + policy->cpu = 0; +} + + +/** + * longrun_set_policy - sets a new CPUFreq policy + * @policy - new policy + * + * Sets a new CPUFreq policy on LongRun-capable processors. This function + * has to be called with cpufreq_driver locked. + */ +static int longrun_set_policy(struct cpufreq_policy *policy) +{ + u32 msr_lo, msr_hi; + u32 pctg_lo, pctg_hi; + + if (!longrun_driver || !policy) + return -EINVAL; + + pctg_lo = (policy->min - longrun_low_freq) / + ((longrun_high_freq - longrun_low_freq) / 100); + pctg_hi = (policy->max - longrun_low_freq) / + ((longrun_high_freq - longrun_low_freq) / 100); + + if (pctg_hi > 100) + pctg_hi = 100; + if (pctg_lo > pctg_hi) + pctg_lo = pctg_hi; + + /* performance or economy mode */ + rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); + msr_lo &= 0xFFFFFFFE; + switch (policy->policy) { + case CPUFREQ_POLICY_PERFORMANCE: + msr_lo |= 0x00000001; + break; + case CPUFREQ_POLICY_POWERSAVE: + break; + } + wrmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); + + /* lower and upper boundary */ + rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + msr_lo &= 0xFFFFFF80; + msr_hi &= 0xFFFFFF80; + msr_lo |= pctg_lo; + msr_hi |= pctg_hi; + wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + + return 0; +} + + +/** + * longrun_verify_poliy - verifies a new CPUFreq policy + * + * Validates a new CPUFreq policy. This function has to be called with + * cpufreq_driver locked. + */ +static int longrun_verify_policy(struct cpufreq_policy *policy) +{ + if (!policy || !longrun_driver) + return -EINVAL; + + policy->cpu = 0; + cpufreq_verify_within_limits(policy, + longrun_driver->policy[0].cpuinfo.min_freq, + longrun_driver->policy[0].cpuinfo.max_freq); + + return 0; +} + + +/** + * longrun_determine_freqs - determines the lowest and highest possible core frequency + * + * Determines the lowest and highest possible core frequencies on this CPU. + * This is neccessary to calculate the performance percentage according to + * TMTA rules: + * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq) + */ +static unsigned int __init longrun_determine_freqs(unsigned int *low_freq, + unsigned int *high_freq) +{ + u32 msr_lo, msr_hi; + u32 save_lo, save_hi; + u32 eax, ebx, ecx, edx; + struct cpuinfo_x86 *c = cpu_data; + + if (!low_freq || !high_freq) + return -EINVAL; + + if (cpu_has(c, X86_FEATURE_LRTI)) { + /* if the LongRun Table Interface is present, the + * detection is a bit easier: + * For minimum frequency, read out the maximum + * level (msr_hi), write that into "currently + * selected level", and read out the frequency. + * For maximum frequency, read out level zero. + */ + /* minimum */ + rdmsr(MSR_TMTA_LRTI_READOUT, msr_lo, msr_hi); + wrmsr(MSR_TMTA_LRTI_READOUT, msr_hi, msr_hi); + rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); + *low_freq = msr_lo * 1000; /* to kHz */ + + /* maximum */ + wrmsr(MSR_TMTA_LRTI_READOUT, 0, msr_hi); + rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); + *high_freq = msr_lo * 1000; /* to kHz */ + + if (*low_freq > *high_freq) + *low_freq = *high_freq; + return 0; + } + + /* set the upper border to the value determined during TSC init */ + *high_freq = (cpu_khz / 1000); + *high_freq = *high_freq * 1000; + + /* get current borders */ + rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + save_lo = msr_lo & 0x0000007F; + save_hi = msr_hi & 0x0000007F; + + /* if current perf_pctg is larger than 90%, we need to decrease the + * upper limit to make the calculation more accurate. + */ + cpuid(0x80860007, &eax, &ebx, &ecx, &edx); + if (ecx > 90) { + /* set to 0 to 80 perf_pctg */ + msr_lo &= 0xFFFFFF80; + msr_hi &= 0xFFFFFF80; + msr_lo |= 0; + msr_hi |= 80; + wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + + /* read out current core MHz and current perf_pctg */ + cpuid(0x80860007, &eax, &ebx, &ecx, &edx); + + /* restore values */ + wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi); + } + + /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) + * eqals + * low_freq * ( 1 - perf_pctg) = (cur_freq - high_freq * perf_pctg) + * + * high_freq * perf_pctg is stored tempoarily into "ebx". + */ + ebx = (((cpu_khz / 1000) * ecx) / 100); /* to MHz */ + + if ((ecx > 95) || (ecx == 0) || (eax < ebx)) + return -EIO; + + edx = (eax - ebx) / (100 - ecx); + *low_freq = edx * 1000; /* back to kHz */ + + if (*low_freq > *high_freq) + *low_freq = *high_freq; + + return 0; +} + + +/** + * longrun_init - initializes the Transmeta Crusoe LongRun CPUFreq driver + * + * Initializes the LongRun support. + */ +static int __init longrun_init(void) +{ + int result; + struct cpufreq_driver *driver; + struct cpuinfo_x86 *c = cpu_data; + + if (c->x86_vendor != X86_VENDOR_TRANSMETA || + !cpu_has(c, X86_FEATURE_LONGRUN)) + return 0; + + /* initialization of main "cpufreq" code*/ + driver = kmalloc(sizeof(struct cpufreq_driver) + + NR_CPUS * sizeof(struct cpufreq_policy), GFP_KERNEL); + if (!driver) + return -ENOMEM; + + driver->policy = (struct cpufreq_policy *) (driver + 1); + + if (longrun_determine_freqs(&longrun_low_freq, &longrun_high_freq)) { + kfree(driver); + return -EIO; + } + driver->policy[0].cpuinfo.min_freq = longrun_low_freq; + driver->policy[0].cpuinfo.max_freq = longrun_high_freq; + driver->policy[0].cpuinfo.transition_latency = CPUFREQ_ETERNAL; + + longrun_get_policy(&driver->policy[0]); + +#ifdef CONFIG_CPU_FREQ_24_API + driver->cpu_cur_freq[0] = longrun_high_freq; /* dummy value */ +#endif + + driver->verify = &longrun_verify_policy; + driver->setpolicy = &longrun_set_policy; + + longrun_driver = driver; + + result = cpufreq_register(driver); + if (result) { + longrun_driver = NULL; + kfree(driver); + } + + return result; +} + + +/** + * longrun_exit - unregisters LongRun support + */ +static void __exit longrun_exit(void) +{ + if (longrun_driver) { + cpufreq_unregister(); + kfree(longrun_driver); + } +} + + +MODULE_AUTHOR ("Dominik Brodowski "); +MODULE_DESCRIPTION ("LongRun driver for Transmeta Crusoe processors."); +MODULE_LICENSE ("GPL"); +module_init(longrun_init); +module_exit(longrun_exit); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/Makefile linux.21pre4-ac2/arch/i386/kernel/Makefile --- linux.21pre4/arch/i386/kernel/Makefile 2003-01-29 16:27:05.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/Makefile 2003-01-10 16:26:51.000000000 +0000 @@ -40,5 +40,14 @@ obj-$(CONFIG_X86_LOCAL_APIC) += mpparse.o apic.o nmi.o obj-$(CONFIG_X86_IO_APIC) += io_apic.o acpitable.o obj-$(CONFIG_X86_VISWS_APIC) += visws_apic.o +obj-$(CONFIG_EDD) += edd.o +obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o +obj-$(CONFIG_X86_LONGHAUL) += longhaul.o +obj-$(CONFIG_X86_SPEEDSTEP) += speedstep.o +obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o +obj-$(CONFIG_X86_LONGRUN) += longrun.o +obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o +obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o + include $(TOPDIR)/Rules.make diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/microcode.c linux.21pre4-ac2/arch/i386/kernel/microcode.c --- linux.21pre4/arch/i386/kernel/microcode.c 2003-01-29 16:27:05.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/microcode.c 2003-01-28 16:19:28.000000000 +0000 @@ -55,7 +55,7 @@ * Tigran Aivazian , * Serialize updates as required on HT processors due to speculative * nature of implementation. - * 1.11 22 Mar 2001 Tigran Aivazian + * 1.11 22 Mar 2002 Tigran Aivazian * Fix the panic when writing zero-length microcode chunk. */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/mpparse.c linux.21pre4-ac2/arch/i386/kernel/mpparse.c --- linux.21pre4/arch/i386/kernel/mpparse.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/mpparse.c 2003-01-29 17:11:52.000000000 +0000 @@ -126,6 +126,8 @@ case 0x0F: if (model == 0x00) return("Pentium 4(tm)"); + if (model == 0x01) + return("Pentium 4(tm)"); if (model == 0x02) return("Pentium 4(tm) XEON(tm)"); if (model == 0x0F) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/mtrr.c linux.21pre4-ac2/arch/i386/kernel/mtrr.c --- linux.21pre4/arch/i386/kernel/mtrr.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/mtrr.c 2003-01-29 17:12:01.000000000 +0000 @@ -492,6 +492,14 @@ return 0; } } + /* Intel 450NX errata # 23. No ascending cachline evictions to + write combining memory may resulting in data corruption */ + dev = pci_find_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, NULL); + if(dev) + { + printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n"); + return 0; + } switch ( mtrr_if ) { diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/p4-clockmod.c linux.21pre4-ac2/arch/i386/kernel/p4-clockmod.c --- linux.21pre4/arch/i386/kernel/p4-clockmod.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/arch/i386/kernel/p4-clockmod.c 2003-01-10 16:27:32.000000000 +0000 @@ -0,0 +1,287 @@ +/* + * Pentium 4/Xeon CPU on demand clock modulation/speed scaling + * (C) 2002 Zwane Mwaikambo + * (C) 2002 Arjan van de Ven + * (C) 2002 Tora T. Engstad + * All Rights Reserved + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * The author(s) of this software shall not be held liable for damages + * of any nature resulting due to the use of this software. This + * software is provided AS-IS with no warranties. + * + * Date Errata Description + * 20020525 N44, O17 12.5% or 25% DC causes lockup + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define PFX "cpufreq: " + +/* + * Duty Cycle (3bits), note DC_DISABLE is not specified in + * intel docs i just use it to mean disable + */ +enum { + DC_RESV, DC_DFLT, DC_25PT, DC_38PT, DC_50PT, + DC_64PT, DC_75PT, DC_88PT, DC_DISABLE +}; + +#define DC_ENTRIES 8 + + +static int has_N44_O17_errata; +static int stock_freq; +MODULE_PARM(stock_freq, "i"); + +static struct cpufreq_driver *cpufreq_p4_driver; + + +static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) +{ + u32 l, h; + unsigned long cpus_allowed; + struct cpufreq_freqs freqs; + int hyperthreading = 0; + int affected_cpu_map = 0; + int sibling = 0; + + if (!cpu_online(cpu) || (newstate > DC_DISABLE) || + (newstate == DC_RESV)) + return -EINVAL; + + /* switch to physical CPU where state is to be changed*/ + cpus_allowed = current->cpus_allowed; + + /* only run on CPU to be set, or on its sibling */ + affected_cpu_map = 1 << cpu; +#ifdef CONFIG_X86_HT + hyperthreading = ((cpu_has_ht) && (smp_num_siblings == 2)); + if (hyperthreading) { + sibling = cpu_sibling_map[cpu]; + affected_cpu_map |= (1 << sibling); + } +#endif + set_cpus_allowed(current, affected_cpu_map); + BUG_ON(!(affected_cpu_map & (1 << smp_processor_id()))); + + /* get current state */ + rdmsr(MSR_IA32_THERM_CONTROL, l, h); + if (l & 0x10) { + l = l >> 1; + l &= 0x7; + } else + l = DC_DISABLE; + + if (l == newstate) { + set_cpus_allowed(current, cpus_allowed); + return 0; + } else if (l == DC_RESV) { + printk(KERN_ERR PFX "BIG FAT WARNING: currently in invalid setting\n"); + } + + /* notifiers */ + freqs.old = stock_freq * l / 8; + freqs.new = stock_freq * newstate / 8; + freqs.cpu = cpu; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + if (hyperthreading) { + freqs.cpu = sibling; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + } + + rdmsr(MSR_IA32_THERM_STATUS, l, h); + if (l & 0x01) + printk(KERN_DEBUG PFX "CPU#%d currently thermal throttled\n", cpu); + + if (has_N44_O17_errata && (newstate == DC_25PT || newstate == DC_DFLT)) + newstate = DC_38PT; + + rdmsr(MSR_IA32_THERM_CONTROL, l, h); + if (newstate == DC_DISABLE) { + printk(KERN_INFO PFX "CPU#%d disabling modulation\n", cpu); + wrmsr(MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); + } else { + printk(KERN_INFO PFX "CPU#%d setting duty cycle to %d%%\n", cpu, ((125 * newstate) / 10)); + /* bits 63 - 5 : reserved + * bit 4 : enable/disable + * bits 3-1 : duty cycle + * bit 0 : reserved + */ + l = (l & ~14); + l = l | (1<<4) | ((newstate & 0x7)<<1); + wrmsr(MSR_IA32_THERM_CONTROL, l, h); + } + + set_cpus_allowed(current, cpus_allowed); + + /* notifiers */ + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + if (hyperthreading) { + freqs.cpu = cpu; + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + } + + return 0; +} + + +static struct cpufreq_frequency_table p4clockmod_table[] = { + {DC_RESV, CPUFREQ_ENTRY_INVALID}, + {DC_DFLT, 0}, + {DC_25PT, 0}, + {DC_38PT, 0}, + {DC_50PT, 0}, + {DC_64PT, 0}, + {DC_75PT, 0}, + {DC_88PT, 0}, + {DC_DISABLE, 0}, + {DC_RESV, CPUFREQ_TABLE_END}, +}; + + +static int cpufreq_p4_setpolicy(struct cpufreq_policy *policy) +{ + unsigned int newstate = DC_RESV; + + if (cpufreq_frequency_table_setpolicy(policy, &p4clockmod_table[0], &newstate)) + return -EINVAL; + + cpufreq_p4_setdc(policy->cpu, newstate); + + return 0; +} + + +static int cpufreq_p4_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]); +} + + +static int __init cpufreq_p4_init(void) +{ + struct cpuinfo_x86 *c = cpu_data; + int cpuid; + int ret; + struct cpufreq_driver *driver; + unsigned int i; + + /* + * THERM_CONTROL is architectural for IA32 now, so + * we can rely on the capability checks + */ + if (c->x86_vendor != X86_VENDOR_INTEL) + return -ENODEV; + + if (!test_bit(X86_FEATURE_ACPI, c->x86_capability) || + !test_bit(X86_FEATURE_ACC, c->x86_capability)) + return -ENODEV; + + /* Errata workarounds */ + cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask; + switch (cpuid) { + case 0x0f07: + case 0x0f0a: + case 0x0f11: + case 0x0f12: + has_N44_O17_errata = 1; + default: + break; + } + + printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock Modulation available\n"); + + if (!stock_freq) { + if (cpu_khz) + stock_freq = cpu_khz; + else { + printk(KERN_INFO PFX "unknown core frequency - please use module parameter 'stock_freq'\n"); + return -EINVAL; + } + } + + driver = kmalloc(sizeof(struct cpufreq_driver) + + NR_CPUS * sizeof(struct cpufreq_policy), GFP_KERNEL); + if (!driver) + return -ENOMEM; + + driver->policy = (struct cpufreq_policy *) (driver + 1); + + /* table init */ + for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { + if ((i<2) && (has_N44_O17_errata)) + p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; + else + p4clockmod_table[i].frequency = (stock_freq * i)/8; + } + + +#ifdef CONFIG_CPU_FREQ_24_API + for (i=0;icpu_cur_freq[i] = stock_freq; + } +#endif + + driver->verify = &cpufreq_p4_verify; + driver->setpolicy = &cpufreq_p4_setpolicy; + + for (i=0;ipolicy[i].cpu = i; + ret = cpufreq_frequency_table_cpuinfo(&driver->policy[i], &p4clockmod_table[0]); + if (ret) { + kfree(driver); + return ret; + } + driver->policy[i].policy = CPUFREQ_POLICY_PERFORMANCE; + driver->policy[i].cpuinfo.transition_latency = CPUFREQ_ETERNAL; + } + + cpufreq_p4_driver = driver; + + ret = cpufreq_register(driver); + if (ret) { + cpufreq_p4_driver = NULL; + kfree(driver); + } + + return ret; +} + + +static void __exit cpufreq_p4_exit(void) +{ + unsigned int i; + + if (cpufreq_p4_driver) { + for (i=0; i"); +MODULE_DESCRIPTION ("cpufreq driver for Pentium(TM) 4/Xeon(TM)"); +MODULE_LICENSE ("GPL"); + +module_init(cpufreq_p4_init); +module_exit(cpufreq_p4_exit); + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/pci-dma.c linux.21pre4-ac2/arch/i386/kernel/pci-dma.c --- linux.21pre4/arch/i386/kernel/pci-dma.c 2003-01-29 16:27:05.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/pci-dma.c 2003-01-06 16:22:34.000000000 +0000 @@ -19,7 +19,7 @@ void *ret; int gfp = GFP_ATOMIC; - if (hwdev == NULL || ((u32)hwdev->dma_mask < 0xffffffff)) + if (hwdev == NULL || hwdev->dma_mask < 0xffffffff) gfp |= GFP_DMA; ret = (void *)__get_free_pages(gfp, get_order(size)); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/pci-irq.c linux.21pre4-ac2/arch/i386/kernel/pci-irq.c --- linux.21pre4/arch/i386/kernel/pci-irq.c 2003-01-29 16:27:05.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/pci-irq.c 2003-01-31 11:18:37.000000000 +0000 @@ -779,9 +779,16 @@ void pcibios_enable_irq(struct pci_dev *dev) { u8 pin; + extern int interrupt_line_quirk; + pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) { char *msg; + + /* With IDE legacy devices the IRQ lookup failure is not a problem.. */ + if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE && !(dev->class & 0x5)) + return; + if (io_apic_assign_pci_irqs) msg = " Probably buggy MP table."; else if (pci_probe & PCI_BIOS_IRQ_SCAN) @@ -791,4 +798,9 @@ printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n", 'A' + pin - 1, dev->slot_name, msg); } + /* VIA bridges use interrupt line for apic/pci steering across + the V-Link */ + else if (interrupt_line_quirk) + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); + } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/powernow-k6.c linux.21pre4-ac2/arch/i386/kernel/powernow-k6.c --- linux.21pre4/arch/i386/kernel/powernow-k6.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/arch/i386/kernel/powernow-k6.c 2003-01-10 16:27:32.000000000 +0000 @@ -0,0 +1,237 @@ +/* + * $Id: powernow-k6.c,v 1.42 2003/01/02 22:41:08 db Exp $ + * This file was part of Powertweak Linux (http://powertweak.sf.net) + * and is shared with the Linux Kernel module. + * + * (C) 2000-2002 Dave Jones, Arjan van de Ven, Janne Pänkälä, Dominik Brodowski. + * + * Licensed under the terms of the GNU GPL License version 2. + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +#define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long + as it is unused */ + +static struct cpufreq_driver *powernow_driver; +static unsigned int busfreq; /* FSB, in 10 kHz */ +static unsigned int max_multiplier; + + +/* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */ +static struct cpufreq_frequency_table clock_ratio[] = { + {45, /* 000 -> 4.5x */ 0}, + {50, /* 001 -> 5.0x */ 0}, + {40, /* 010 -> 4.0x */ 0}, + {55, /* 011 -> 5.5x */ 0}, + {20, /* 100 -> 2.0x */ 0}, + {30, /* 101 -> 3.0x */ 0}, + {60, /* 110 -> 6.0x */ 0}, + {35, /* 111 -> 3.5x */ 0}, + {0, CPUFREQ_TABLE_END} +}; + + +/** + * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier + * + * Returns the current setting of the frequency multiplier. Core clock + * speed is frequency of the Front-Side Bus multiplied with this value. + */ +static int powernow_k6_get_cpu_multiplier(void) +{ + u64 invalue = 0; + u32 msrval; + + msrval = POWERNOW_IOPORT + 0x1; + wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ + invalue=inl(POWERNOW_IOPORT + 0x8); + msrval = POWERNOW_IOPORT + 0x0; + wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ + + return clock_ratio[(invalue >> 5)&7].index; +} + + +/** + * powernow_k6_set_state - set the PowerNow! multiplier + * @best_i: clock_ratio[best_i] is the target multiplier + * + * Tries to change the PowerNow! multiplier + */ +static void powernow_k6_set_state (unsigned int best_i) +{ + unsigned long outvalue=0, invalue=0; + unsigned long msrval; + struct cpufreq_freqs freqs; + + if (!powernow_driver) { + printk(KERN_ERR "cpufreq: initialization problem or invalid target frequency\n"); + return; + } + + freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); + freqs.new = busfreq * clock_ratio[best_i].index; + freqs.cpu = 0; /* powernow-k6.c is UP only driver */ + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + /* we now need to transform best_i to the BVC format, see AMD#23446 */ + + outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5); + + msrval = POWERNOW_IOPORT + 0x1; + wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ + invalue=inl(POWERNOW_IOPORT + 0x8); + invalue = invalue & 0xf; + outvalue = outvalue | invalue; + outl(outvalue ,(POWERNOW_IOPORT + 0x8)); + msrval = POWERNOW_IOPORT + 0x0; + wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + return; +} + + +/** + * powernow_k6_verify - verifies a new CPUfreq policy + * @policy: new policy + * + * Policy must be within lowest and highest possible CPU Frequency, + * and at least one possible state must be within min and max. + */ +static int powernow_k6_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &clock_ratio[0]); +} + + +/** + * powernow_k6_setpolicy - sets a new CPUFreq policy + * @policy - new policy + * + * sets a new CPUFreq policy + */ +static int powernow_k6_setpolicy (struct cpufreq_policy *policy) +{ + unsigned int newstate = 0; + + if (cpufreq_frequency_table_setpolicy(policy, &clock_ratio[0], &newstate)) + return -EINVAL; + + powernow_k6_set_state(newstate); + + return 0; +} + + +/** + * powernow_k6_init - initializes the k6 PowerNow! CPUFreq driver + * + * Initializes the K6 PowerNow! support. Returns -ENODEV on unsupported + * devices, -EINVAL or -ENOMEM on problems during initiatization, and zero + * on success. + */ +static int __init powernow_k6_init(void) +{ + struct cpuinfo_x86 *c = cpu_data; + struct cpufreq_driver *driver; + unsigned int result; + unsigned int i; + + if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) || + ((c->x86_model != 12) && (c->x86_model != 13))) + return -ENODEV; + + max_multiplier = powernow_k6_get_cpu_multiplier(); + busfreq = cpu_khz / max_multiplier; + + if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) { + printk("cpufreq: PowerNow IOPORT region already used.\n"); + return -EIO; + } + + /* initialization of main "cpufreq" code*/ + driver = kmalloc(sizeof(struct cpufreq_driver) + + NR_CPUS * sizeof(struct cpufreq_policy), GFP_KERNEL); + if (!driver) { + release_region (POWERNOW_IOPORT, 16); + return -ENOMEM; + } + driver->policy = (struct cpufreq_policy *) (driver + 1); + + /* table init */ + for (i=0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { + if (clock_ratio[i].index > max_multiplier) + clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID; + else + clock_ratio[i].frequency = busfreq * clock_ratio[i].index; + } + + driver->verify = &powernow_k6_verify; + driver->setpolicy = &powernow_k6_setpolicy; + + /* cpuinfo and default policy values */ + driver->policy[0].cpu = 0; + driver->policy[0].cpuinfo.transition_latency = CPUFREQ_ETERNAL; + driver->policy[0].policy = CPUFREQ_POLICY_PERFORMANCE; +#ifdef CONFIG_CPU_FREQ_24_API + driver->cpu_cur_freq[0] = busfreq * max_multiplier; +#endif + result = cpufreq_frequency_table_cpuinfo(&driver->policy[0], &clock_ratio[0]); + if (result) { + kfree(driver); + return result; + } + + powernow_driver = driver; + + result = cpufreq_register(driver); + if (result) { + release_region (POWERNOW_IOPORT, 16); + powernow_driver = NULL; + kfree(driver); + } + + return result; +} + + +/** + * powernow_k6_exit - unregisters AMD K6-2+/3+ PowerNow! support + * + * Unregisters AMD K6-2+ / K6-3+ PowerNow! support. + */ +static void __exit powernow_k6_exit(void) +{ + unsigned int i; + + if (powernow_driver) { + for (i=0;i<8;i++) + if (clock_ratio[i].index == max_multiplier) + powernow_k6_set_state(i); + cpufreq_unregister(); + kfree(powernow_driver); + } +} + + +MODULE_AUTHOR ("Arjan van de Ven , Dave Jones , Dominik Brodowski "); +MODULE_DESCRIPTION ("PowerNow! driver for AMD K6-2+ / K6-3+ processors."); +MODULE_LICENSE ("GPL"); +module_init(powernow_k6_init); +module_exit(powernow_k6_exit); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/process.c linux.21pre4-ac2/arch/i386/kernel/process.c --- linux.21pre4/arch/i386/kernel/process.c 2003-01-29 16:27:05.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/process.c 2003-01-06 19:12:06.000000000 +0000 @@ -124,15 +124,12 @@ void cpu_idle (void) { /* endless idle loop with no priority at all */ - init_idle(); - current->nice = 20; - current->counter = -100; while (1) { void (*idle)(void) = pm_idle; if (!idle) idle = default_idle; - while (!current->need_resched) + if (!current->need_resched) idle(); schedule(); check_pgt_cache(); @@ -187,7 +184,7 @@ } /* we will leave sorting out the final value when we are ready to reboot, since we might not - have set up boot_cpu_id or smp_num_cpu */ + have set up boot_cpu_physical_apicid or smp_num_cpu */ break; #endif } @@ -253,7 +250,7 @@ 0x66, 0x0f, 0x20, 0xc3, /* movl %cr0,%ebx */ 0x66, 0x81, 0xe3, 0x00, 0x00, 0x00, 0x60, /* andl $0x60000000,%ebx */ 0x74, 0x02, /* jz f */ - 0x0f, 0x08, /* invd */ + 0x0f, 0x09, /* wbinvd */ 0x24, 0x10, /* f: andb $0x10,al */ 0x66, 0x0f, 0x22, 0xc0 /* movl %eax,%cr0 */ }; @@ -466,23 +463,6 @@ } /* - * No need to lock the MM as we are the last user - */ -void release_segments(struct mm_struct *mm) -{ - void * ldt = mm->context.segments; - - /* - * free the LDT - */ - if (ldt) { - mm->context.segments = NULL; - clear_LDT(); - vfree(ldt); - } -} - -/* * Create a kernel thread */ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) @@ -534,45 +514,19 @@ void release_thread(struct task_struct *dead_task) { if (dead_task->mm) { - void * ldt = dead_task->mm->context.segments; - // temporary debugging check - if (ldt) { - printk("WARNING: dead process %8s still has LDT? <%p>\n", - dead_task->comm, ldt); + if (dead_task->mm->context.size) { + printk("WARNING: dead process %8s still has LDT? <%p/%d>\n", + dead_task->comm, + dead_task->mm->context.ldt, + dead_task->mm->context.size); BUG(); } } - release_x86_irqs(dead_task); } /* - * we do not have to muck with descriptors here, that is - * done in switch_mm() as needed. - */ -void copy_segments(struct task_struct *p, struct mm_struct *new_mm) -{ - struct mm_struct * old_mm; - void *old_ldt, *ldt; - - ldt = NULL; - old_mm = current->mm; - if (old_mm && (old_ldt = old_mm->context.segments) != NULL) { - /* - * Completely new LDT, we initialize it from the parent: - */ - ldt = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE); - if (!ldt) - printk(KERN_WARNING "ldt allocation failed\n"); - else - memcpy(ldt, old_ldt, LDT_ENTRIES*LDT_ENTRY_SIZE); - } - new_mm->context.segments = ldt; - new_mm->context.cpuvalid = ~0UL; /* valid on all CPU's - they can't have stale data */ -} - -/* * Save a segment. */ #define savesegment(seg,value) \ @@ -697,15 +651,17 @@ asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs)); /* - * Restore %fs and %gs. + * Restore %fs and %gs if needed. */ - loadsegment(fs, next->fs); - loadsegment(gs, next->gs); + if (unlikely(prev->fs | prev->gs | next->fs | next->gs)) { + loadsegment(fs, next->fs); + loadsegment(gs, next->gs); + } /* * Now maybe reload the debug registers */ - if (next->debugreg[7]){ + if (unlikely(next->debugreg[7])) { loaddebug(next, 0); loaddebug(next, 1); loaddebug(next, 2); @@ -715,7 +671,7 @@ loaddebug(next, 7); } - if (prev->ioperm || next->ioperm) { + if (unlikely(prev->ioperm || next->ioperm)) { if (next->ioperm) { /* * 4 cachelines copy ... not good, but not that diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/setup.c linux.21pre4-ac2/arch/i386/kernel/setup.c --- linux.21pre4/arch/i386/kernel/setup.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/setup.c 2003-01-28 16:15:24.000000000 +0000 @@ -118,6 +118,7 @@ #include #include #include +#include /* * Machine setup.. */ @@ -196,6 +197,8 @@ #define KERNEL_START (*(unsigned long *) (PARAM+0x214)) #define INITRD_START (*(unsigned long *) (PARAM+0x218)) #define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c)) +#define EDD_NR (*(unsigned char *) (PARAM+EDDNR)) +#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF)) #define COMMAND_LINE ((char *) (PARAM+2048)) #define COMMAND_LINE_SIZE 256 @@ -203,6 +206,7 @@ #define RAMDISK_PROMPT_FLAG 0x8000 #define RAMDISK_LOAD_FLAG 0x4000 + #ifdef CONFIG_VISWS char visws_board_type = -1; char visws_board_rev = -1; @@ -700,6 +704,23 @@ return 0; } +#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) +unsigned char eddnr; +struct edd_info edd[EDDMAXNR]; +/** + * copy_edd() - Copy the BIOS EDD information + * from empty_zero_page into a safe place. + * + */ +static inline void copy_edd(void) +{ + eddnr = EDD_NR; + memcpy(edd, EDD_BUF, sizeof(edd)); +} +#else +#define copy_edd() do {} while (0) +#endif + /* * Do NOT EVER look at the BIOS memory size location. * It does not work on many machines. @@ -1112,6 +1133,7 @@ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif setup_memory_region(); + copy_edd(); if (!MOUNT_ROOT_RDONLY) root_mountflags &= ~MS_RDONLY; @@ -2918,6 +2940,7 @@ * applications want to get the raw CPUID data, they should access * /dev/cpu//cpuid instead. */ + extern int phys_proc_id[NR_CPUS]; static char *x86_cap_flags[] = { /* Intel-defined */ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", @@ -2975,6 +2998,11 @@ /* Cache size */ if (c->x86_cache_size >= 0) seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); + +#ifdef CONFIG_SMP + seq_printf(m, "physical id\t: %d\n",phys_proc_id[n]); + seq_printf(m, "siblings\t: %d\n",smp_num_siblings); +#endif /* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */ fpu_exception = c->hard_math && (ignore_irq13 || cpu_has_fpu); @@ -3077,11 +3105,12 @@ set_tss_desc(nr,t); gdt_table[__TSS(nr)].b &= 0xfffffdff; load_TR(nr); - load_LDT(&init_mm); + load_LDT(&init_mm.context); - /* - * Clear all 6 debug registers: - */ + /* Clear %fs and %gs. */ + asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs"); + + /* Clear all 6 debug registers: */ #define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) ); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/smpboot.c linux.21pre4-ac2/arch/i386/kernel/smpboot.c --- linux.21pre4/arch/i386/kernel/smpboot.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/smpboot.c 2003-01-06 19:12:31.000000000 +0000 @@ -58,7 +58,7 @@ /* Number of siblings per CPU package */ int smp_num_siblings = 1; -int __initdata phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */ +int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */ /* Bitmask of currently online CPUs */ unsigned long cpu_online_map; @@ -365,7 +365,7 @@ * (This works even if the APIC is not enabled.) */ phys_id = GET_APIC_ID(apic_read(APIC_ID)); - cpuid = current->processor; + cpuid = cpu(); if (test_and_set_bit(cpuid, &cpu_online_map)) { printk("huh, phys CPU#%d, CPU#%d already present??\n", phys_id, cpuid); @@ -435,6 +435,7 @@ */ smp_store_cpu_info(cpuid); + disable_APIC_timer(); /* * Allow the master to continue. */ @@ -443,7 +444,7 @@ /* * Synchronize the TSC with the BP */ - if (cpu_has_tsc) + if (cpu_has_tsc > 1) synchronize_tsc_ap(); } @@ -465,6 +466,7 @@ smp_callin(); while (!atomic_read(&smp_commenced)) rep_nop(); + enable_APIC_timer(); /* * low-memory mappings have been cleared, flush them from * the local TLBs too. @@ -803,16 +805,13 @@ if (!idle) panic("No idle process for CPU %d", cpu); - idle->processor = cpu; - idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */ + init_idle(idle, cpu); map_cpu_to_boot_apicid(cpu, apicid); idle->thread.eip = (unsigned long) start_secondary; - del_from_runqueue(idle); unhash_process(idle); - init_tasks[cpu] = idle; /* start_eip had better be page-aligned! */ start_eip = setup_trampoline(); @@ -925,6 +924,7 @@ } cycles_t cacheflush_time; +unsigned long cache_decay_ticks; static void smp_tune_scheduling (void) { @@ -958,9 +958,13 @@ cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth; } + cache_decay_ticks = (long)cacheflush_time/cpu_khz * HZ / 1000; + printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n", (long)cacheflush_time/(cpu_khz/1000), ((long)cacheflush_time*100/(cpu_khz/1000)) % 100); + printk("task migration cache decay timeout: %ld msecs.\n", + (cache_decay_ticks + 1) * 1000 / HZ); } /* @@ -1026,8 +1030,7 @@ map_cpu_to_boot_apicid(0, boot_cpu_apicid); global_irq_holder = 0; - current->processor = 0; - init_idle(); + current->cpu = 0; smp_tune_scheduling(); /* @@ -1219,7 +1222,7 @@ /* * Synchronize the TSC with the AP */ - if (cpu_has_tsc && cpucount) + if (cpu_has_tsc > 1 && cpucount) synchronize_tsc_bp(); smp_done: diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/smp.c linux.21pre4-ac2/arch/i386/kernel/smp.c --- linux.21pre4/arch/i386/kernel/smp.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/smp.c 2003-01-06 19:12:38.000000000 +0000 @@ -496,13 +496,23 @@ * it goes straight through and wastes no time serializing * anything. Worst case is that we lose a reschedule ... */ - void smp_send_reschedule(int cpu) { send_IPI_mask(1 << cpu, RESCHEDULE_VECTOR); } /* + * this function sends a reschedule IPI to all (other) CPUs. + * This should only be used if some 'global' task became runnable, + * such as a RT task, that must be handled now. The first CPU + * that manages to grab the task will run it. + */ +void smp_send_reschedule_all(void) +{ + send_IPI_allbutself(RESCHEDULE_VECTOR); +} + +/* * Structure and data for smp_call_function(). This is designed to minimise * static memory requirements. It also looks cleaner. */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/speedstep.c linux.21pre4-ac2/arch/i386/kernel/speedstep.c --- linux.21pre4/arch/i386/kernel/speedstep.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/arch/i386/kernel/speedstep.c 2003-01-10 16:27:32.000000000 +0000 @@ -0,0 +1,731 @@ +/* + * $Id: speedstep.c,v 1.64 2003/01/02 22:16:26 db Exp $ + * + * (C) 2001 Dave Jones, Arjan van de ven. + * (C) 2002 Dominik Brodowski + * + * Licensed under the terms of the GNU GPL License version 2. + * Based upon reverse engineered information, and on Intel documentation + * for chipsets ICH2-M and ICH3-M. + * + * Many thanks to Ducrot Bruno for finding and fixing the last + * "missing link" for ICH2-M/ICH3-M support, and to Thomas Winkler + * for extensive testing. + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + + +/********************************************************************* + * SPEEDSTEP - DEFINITIONS * + *********************************************************************/ + +#include +#include +#include +#include +#include +#include + +#include + + +static struct cpufreq_driver *speedstep_driver; + +/* speedstep_chipset: + * It is necessary to know which chipset is used. As accesses to + * this device occur at various places in this module, we need a + * static struct pci_dev * pointing to that device. + */ +static unsigned int speedstep_chipset; +static struct pci_dev *speedstep_chipset_dev; + +#define SPEEDSTEP_CHIPSET_ICH2M 0x00000002 +#define SPEEDSTEP_CHIPSET_ICH3M 0x00000003 + + +/* speedstep_processor + */ +static unsigned int speedstep_processor = 0; +static int speedstep_coppermine = 0; + +#define SPEEDSTEP_PROCESSOR_PIII_C 0x00000001 /* Coppermine core */ +#define SPEEDSTEP_PROCESSOR_PIII_T 0x00000002 /* Tualatin core */ +#define SPEEDSTEP_PROCESSOR_P4M 0x00000003 /* P4-M with 100 MHz FSB */ + + +/* speedstep_[low,high]_freq + * There are only two frequency states for each processor. Values + * are in kHz for the time being. + */ +#define SPEEDSTEP_HIGH 0x00000000 +#define SPEEDSTEP_LOW 0x00000001 + +static struct cpufreq_frequency_table speedstep_freqs[] = { + {SPEEDSTEP_HIGH, 0}, + {SPEEDSTEP_LOW, 0}, + {0, CPUFREQ_TABLE_END}, +}; + +#define speedstep_low_freq speedstep_freqs[SPEEDSTEP_LOW].frequency +#define speedstep_high_freq speedstep_freqs[SPEEDSTEP_HIGH].frequency + + +/* DEBUG + * Define it if you want verbose debug output, e.g. for bug reporting + */ +//#define SPEEDSTEP_DEBUG + +#ifdef SPEEDSTEP_DEBUG +#define dprintk(msg...) printk(msg) +#else +#define dprintk(msg...) do { } while(0) +#endif + + + +/********************************************************************* + * LOW LEVEL CHIPSET INTERFACE * + *********************************************************************/ + +/** + * speedstep_get_state - read the current SpeedStep state + * @state: Speedstep state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) + * + * Tries to read the SpeedStep state. Returns -EIO when there has been + * trouble to read the status or write to the control register, -EINVAL + * on an unsupported chipset, and zero on success. + */ +static int speedstep_get_state (unsigned int *state) +{ + unsigned long flags; + u32 pmbase; + u8 value; + + if (!speedstep_chipset_dev || !state) + return -EINVAL; + + switch (speedstep_chipset) { + case SPEEDSTEP_CHIPSET_ICH2M: + case SPEEDSTEP_CHIPSET_ICH3M: + /* get PMBASE */ + pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase); + if (!(pmbase & 0x01)) + return -EIO; + + pmbase &= 0xFFFFFFFE; + if (!pmbase) + return -EIO; + + /* read state */ + local_irq_save(flags); + value = inb(pmbase + 0x50); + local_irq_restore(flags); + + dprintk(KERN_DEBUG "cpufreq: read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); + + *state = value & 0x01; + return 0; + + } + + printk (KERN_ERR "cpufreq: setting CPU frequency on this chipset unsupported.\n"); + return -EINVAL; +} + + +/** + * speedstep_set_state - set the SpeedStep state + * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) + * + * Tries to change the SpeedStep state. + */ +static void speedstep_set_state (unsigned int state, int notify) +{ + u32 pmbase; + u8 pm2_blk; + u8 value; + unsigned long flags; + unsigned int oldstate; + struct cpufreq_freqs freqs; + + if (!speedstep_chipset_dev || (state > 0x1)) + return; + + if (speedstep_get_state(&oldstate)) + return; + + if (oldstate == state) + return; + + freqs.old = (oldstate == SPEEDSTEP_HIGH) ? speedstep_high_freq : speedstep_low_freq; + freqs.new = (state == SPEEDSTEP_HIGH) ? speedstep_high_freq : speedstep_low_freq; + freqs.cpu = 0; /* speedstep.c is UP only driver */ + + if (notify) + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + switch (speedstep_chipset) { + case SPEEDSTEP_CHIPSET_ICH2M: + case SPEEDSTEP_CHIPSET_ICH3M: + /* get PMBASE */ + pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase); + if (!(pmbase & 0x01)) + { + printk(KERN_ERR "cpufreq: could not find speedstep register\n"); + return; + } + + pmbase &= 0xFFFFFFFE; + if (!pmbase) { + printk(KERN_ERR "cpufreq: could not find speedstep register\n"); + return; + } + + /* Disable IRQs */ + local_irq_save(flags); + + /* read state */ + value = inb(pmbase + 0x50); + + dprintk(KERN_DEBUG "cpufreq: read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); + + /* write new state */ + value &= 0xFE; + value |= state; + + dprintk(KERN_DEBUG "cpufreq: writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase); + + /* Disable bus master arbitration */ + pm2_blk = inb(pmbase + 0x20); + pm2_blk |= 0x01; + outb(pm2_blk, (pmbase + 0x20)); + + /* Actual transition */ + outb(value, (pmbase + 0x50)); + + /* Restore bus master arbitration */ + pm2_blk &= 0xfe; + outb(pm2_blk, (pmbase + 0x20)); + + /* check if transition was sucessful */ + value = inb(pmbase + 0x50); + + /* Enable IRQs */ + local_irq_restore(flags); + + dprintk(KERN_DEBUG "cpufreq: read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); + + if (state == (value & 0x1)) { + dprintk (KERN_INFO "cpufreq: change to %u MHz succeded\n", (freqs.new / 1000)); + } else { + printk (KERN_ERR "cpufreq: change failed - I/O error\n"); + } + break; + default: + printk (KERN_ERR "cpufreq: setting CPU frequency on this chipset unsupported.\n"); + } + + if (notify) + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + return; +} + + +/** + * speedstep_activate - activate SpeedStep control in the chipset + * + * Tries to activate the SpeedStep status and control registers. + * Returns -EINVAL on an unsupported chipset, and zero on success. + */ +static int speedstep_activate (void) +{ + if (!speedstep_chipset_dev) + return -EINVAL; + + switch (speedstep_chipset) { + case SPEEDSTEP_CHIPSET_ICH2M: + case SPEEDSTEP_CHIPSET_ICH3M: + { + u16 value = 0; + + pci_read_config_word(speedstep_chipset_dev, + 0x00A0, &value); + if (!(value & 0x08)) { + value |= 0x08; + dprintk(KERN_DEBUG "cpufreq: activating SpeedStep (TM) registers\n"); + pci_write_config_word(speedstep_chipset_dev, + 0x00A0, value); + } + + return 0; + } + } + + printk (KERN_ERR "cpufreq: SpeedStep (TM) on this chipset unsupported.\n"); + return -EINVAL; +} + + +/** + * speedstep_detect_chipset - detect the Southbridge which contains SpeedStep logic + * + * Detects PIIX4, ICH2-M and ICH3-M so far. The pci_dev points to + * the LPC bridge / PM module which contains all power-management + * functions. Returns the SPEEDSTEP_CHIPSET_-number for the detected + * chipset, or zero on failure. + */ +static unsigned int speedstep_detect_chipset (void) +{ + speedstep_chipset_dev = pci_find_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82801CA_12, + PCI_ANY_ID, + PCI_ANY_ID, + NULL); + if (speedstep_chipset_dev) + return SPEEDSTEP_CHIPSET_ICH3M; + + + speedstep_chipset_dev = pci_find_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82801BA_10, + PCI_ANY_ID, + PCI_ANY_ID, + NULL); + if (speedstep_chipset_dev) { + /* speedstep.c causes lockups on Dell Inspirons 8000 and + * 8100 which use a pretty old revision of the 82815 + * host brige. Abort on these systems. + */ + static struct pci_dev *hostbridge; + u8 rev = 0; + + hostbridge = pci_find_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82815_MC, + PCI_ANY_ID, + PCI_ANY_ID, + NULL); + + if (!hostbridge) + return SPEEDSTEP_CHIPSET_ICH2M; + + pci_read_config_byte(hostbridge, PCI_REVISION_ID, &rev); + if (rev < 5) { + dprintk(KERN_INFO "cpufreq: hostbrige does not support speedstep\n"); + speedstep_chipset_dev = NULL; + return 0; + } + + return SPEEDSTEP_CHIPSET_ICH2M; + } + + return 0; +} + + + +/********************************************************************* + * LOW LEVEL PROCESSOR INTERFACE * + *********************************************************************/ + + +/** + * pentium3_get_frequency - get the core frequencies for PIIIs + * + * Returns the core frequency of a Pentium III processor (in kHz) + */ +static unsigned int pentium3_get_frequency (void) +{ + /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ + struct { + unsigned int ratio; /* Frequency Multiplier (x10) */ + u8 bitmap; /* power on configuration bits + [27, 25:22] (in MSR 0x2a) */ + } msr_decode_mult [] = { + { 30, 0x01 }, + { 35, 0x05 }, + { 40, 0x02 }, + { 45, 0x06 }, + { 50, 0x00 }, + { 55, 0x04 }, + { 60, 0x0b }, + { 65, 0x0f }, + { 70, 0x09 }, + { 75, 0x0d }, + { 80, 0x0a }, + { 85, 0x26 }, + { 90, 0x20 }, + { 100, 0x2b }, + { 0, 0xff } /* error or unknown value */ + }; + /* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */ + struct { + unsigned int value; /* Front Side Bus speed in MHz */ + u8 bitmap; /* power on configuration bits [18: 19] + (in MSR 0x2a) */ + } msr_decode_fsb [] = { + { 66, 0x0 }, + { 100, 0x2 }, + { 133, 0x1 }, + { 0, 0xff} + }; + u32 msr_lo, msr_tmp; + int i = 0, j = 0; + struct cpuinfo_x86 *c = cpu_data; + + /* read MSR 0x2a - we only need the low 32 bits */ + rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); + dprintk(KERN_DEBUG "cpufreq: P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); + msr_tmp = msr_lo; + + /* decode the FSB */ + msr_tmp &= 0x00c0000; + msr_tmp >>= 18; + while (msr_tmp != msr_decode_fsb[i].bitmap) { + if (msr_decode_fsb[i].bitmap == 0xff) + return -EINVAL; + i++; + } + + /* decode the multiplier */ + if ((c->x86_model == 0x08) && (c->x86_mask == 0x01)) + /* different on early Coppermine PIII */ + msr_lo &= 0x03c00000; + else + msr_lo &= 0x0bc00000; + msr_lo >>= 22; + while (msr_lo != msr_decode_mult[j].bitmap) { + if (msr_decode_mult[j].bitmap == 0xff) + return -EINVAL; + j++; + } + + return (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100); +} + + +/** + * pentium4_get_frequency - get the core frequency for P4-Ms + * + * Should return the core frequency (in kHz) for P4-Ms. + */ +static unsigned int pentium4_get_frequency(void) +{ + u32 msr_lo, msr_hi; + + rdmsr(0x2c, msr_lo, msr_hi); + + dprintk(KERN_DEBUG "cpufreq: P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); + + /* First 12 bits seem to change a lot (0x511, 0x410 and 0x30f seen + * yet). Next 12 bits always seem to be 0x300. If this is not true + * on this CPU, complain. Last 8 bits are frequency (in 100MHz). + */ + if (msr_hi || ((msr_lo & 0x00FFF000) != 0x300000)) { + printk(KERN_DEBUG "cpufreq: P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); + printk(KERN_INFO "cpufreq: problem in initialization. Please contact Dominik Brodowski\n"); + printk(KERN_INFO "cpufreq: and attach this dmesg. Thanks in advance\n"); + return 0; + } + + msr_lo >>= 24; + return (msr_lo * 100000); +} + + +/** + * speedstep_detect_processor - detect Intel SpeedStep-capable processors. + * + * Returns the SPEEDSTEP_PROCESSOR_-number for the detected processor, + * or zero on failure. + */ +static unsigned int speedstep_detect_processor (void) +{ + struct cpuinfo_x86 *c = cpu_data; + u32 ebx; + + if ((c->x86_vendor != X86_VENDOR_INTEL) || + ((c->x86 != 6) && (c->x86 != 0xF))) + return 0; + + if (c->x86 == 0xF) { + /* Intel Pentium 4 Mobile P4-M */ + if (c->x86_model != 2) + return 0; + + if ((c->x86_mask != 4) && (c->x86_mask != 7)) + return 0; + + ebx = cpuid_ebx(0x00000001); + ebx &= 0x000000FF; + if ((ebx != 0x0e) && (ebx != 0x0f)) + return 0; + + return SPEEDSTEP_PROCESSOR_P4M; + } + + switch (c->x86_model) { + case 0x0B: /* Intel PIII [Tualatin] */ + /* cpuid_ebx(1) is 0x04 for desktop PIII, + 0x06 for mobile PIII-M */ + ebx = cpuid_ebx(0x00000001); + + ebx &= 0x000000FF; + if (ebx != 0x06) + return 0; + + /* So far all PIII-M processors support SpeedStep. See + * Intel's 24540633.pdf of August 2002 + */ + + return SPEEDSTEP_PROCESSOR_PIII_T; + + case 0x08: /* Intel PIII [Coppermine] */ + { + u32 msr_lo, msr_hi; + + /* all mobile PIII Coppermines have FSB 100 MHz + * ==> sort out a few desktop PIIIs. */ + rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi); + dprintk(KERN_DEBUG "cpufreq: Coppermine: MSR_IA32_EBL_Cr_POWERON is 0x%x, 0x%x\n", msr_lo, msr_hi); + msr_lo &= 0x00c0000; + if (msr_lo != 0x0080000) + return 0; + + if (speedstep_coppermine) + return SPEEDSTEP_PROCESSOR_PIII_C; + + printk(KERN_INFO "cpufreq: in case this is a SpeedStep-capable Intel Pentium III Coppermine\n"); + printk(KERN_INFO "cpufreq: processor, please pass the boot option or module parameter\n"); + printk(KERN_INFO "cpufreq: `speedstep_coppermine=1` to the kernel. Thanks!\n"); + return 0; + } + + default: + return 0; + } +} + + + +/********************************************************************* + * HIGH LEVEL FUNCTIONS * + *********************************************************************/ + +/** + * speedstep_detect_speeds - detects low and high CPU frequencies. + * + * Detects the low and high CPU frequencies in kHz. Returns 0 on + * success or -EINVAL / -EIO on problems. + */ +static int speedstep_detect_speeds (void) +{ + unsigned long flags; + unsigned int state; + int i, result; + + /* Disable irqs for entire detection process */ + local_irq_save(flags); + + for (i=0; i<2; i++) { + /* read the current state */ + result = speedstep_get_state(&state); + if (result) + return result; + + /* save the correct value, and switch to other */ + if (state == SPEEDSTEP_LOW) { + switch (speedstep_processor) { + case SPEEDSTEP_PROCESSOR_PIII_C: + case SPEEDSTEP_PROCESSOR_PIII_T: + speedstep_low_freq = pentium3_get_frequency(); + break; + case SPEEDSTEP_PROCESSOR_P4M: + speedstep_low_freq = pentium4_get_frequency(); + } + speedstep_set_state(SPEEDSTEP_HIGH, 0); + } else { + switch (speedstep_processor) { + case SPEEDSTEP_PROCESSOR_PIII_C: + case SPEEDSTEP_PROCESSOR_PIII_T: + speedstep_high_freq = pentium3_get_frequency(); + break; + case SPEEDSTEP_PROCESSOR_P4M: + speedstep_high_freq = pentium4_get_frequency(); + } + speedstep_set_state(SPEEDSTEP_LOW, 0); + } + } + + local_irq_restore(flags); + + if (!speedstep_low_freq || !speedstep_high_freq || + (speedstep_low_freq == speedstep_high_freq)) + return -EIO; + + return 0; +} + + +/** + * speedstep_setpolicy - set a new CPUFreq policy + * @policy: new policy + * + * Sets a new CPUFreq policy. + */ +static int speedstep_setpolicy (struct cpufreq_policy *policy) +{ + unsigned int newstate = 0; + + if (cpufreq_frequency_table_setpolicy(policy, &speedstep_freqs[0], &newstate)) + return -EINVAL; + + speedstep_set_state(newstate, 1); + + return 0; +} + + +/** + * speedstep_verify - verifies a new CPUFreq policy + * @freq: new policy + * + * Limit must be within speedstep_low_freq and speedstep_high_freq, with + * at least one border included. + */ +static int speedstep_verify (struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); +} + + +#ifndef MODULE +/** + * speedstep_setup speedstep command line parameter parsing + * + * speedstep command line parameter. Use: + * speedstep_coppermine=1 + * if the CPU in your notebook is a SpeedStep-capable Intel + * Pentium III Coppermine. These processors cannot be detected + * automatically, as Intel continues to consider the detection + * alogrithm as proprietary material. + */ +static int __init speedstep_setup(char *str) +{ + speedstep_coppermine = simple_strtoul(str, &str, 0); + return 1; +} +__setup("speedstep_coppermine=", speedstep_setup); +#endif + +/** + * speedstep_init - initializes the SpeedStep CPUFreq driver + * + * Initializes the SpeedStep support. Returns -ENODEV on unsupported + * devices, -EINVAL on problems during initiatization, and zero on + * success. + */ +static int __init speedstep_init(void) +{ + int result; + unsigned int speed; + struct cpufreq_driver *driver; + + + /* detect chipset */ + speedstep_chipset = speedstep_detect_chipset(); + + /* detect chipset */ + if (speedstep_chipset) + speedstep_processor = speedstep_detect_processor(); + + if ((!speedstep_chipset) || (!speedstep_processor)) { + printk(KERN_INFO "cpufreq: Intel(R) SpeedStep(TM) for this %s not (yet) available.\n", speedstep_chipset ? "processor" : "chipset"); + return -ENODEV; + } + + dprintk(KERN_INFO "cpufreq: Intel(R) SpeedStep(TM) support $Revision: 1.64 $\n"); + dprintk(KERN_DEBUG "cpufreq: chipset 0x%x - processor 0x%x\n", + speedstep_chipset, speedstep_processor); + + /* activate speedstep support */ + result = speedstep_activate(); + if (result) + return result; + + /* detect low and high frequency */ + result = speedstep_detect_speeds(); + if (result) + return result; + + /* get current speed setting */ + result = speedstep_get_state(&speed); + if (result) + return result; + + speed = (speed == SPEEDSTEP_LOW) ? speedstep_low_freq : speedstep_high_freq; + + dprintk(KERN_INFO "cpufreq: currently at %s speed setting - %i MHz\n", + (speed == speedstep_low_freq) ? "low" : "high", + (speed / 1000)); + + /* initialization of main "cpufreq" code*/ + driver = kmalloc(sizeof(struct cpufreq_driver) + + NR_CPUS * sizeof(struct cpufreq_policy), GFP_KERNEL); + if (!driver) + return -ENOMEM; + + driver->policy = (struct cpufreq_policy *) (driver + 1); + + driver->policy[0].cpu = 0; + result = cpufreq_frequency_table_cpuinfo(&driver->policy[0], &speedstep_freqs[0]); + if (result) { + kfree(driver); + return result; + } + +#ifdef CONFIG_CPU_FREQ_24_API + driver->cpu_cur_freq[0] = speed; +#endif + + driver->verify = &speedstep_verify; + driver->setpolicy = &speedstep_setpolicy; + + driver->policy[0].cpuinfo.transition_latency = CPUFREQ_ETERNAL; + + driver->policy[0].policy = (speed == speedstep_low_freq) ? + CPUFREQ_POLICY_POWERSAVE : CPUFREQ_POLICY_PERFORMANCE; + + speedstep_driver = driver; + + result = cpufreq_register(driver); + if (result) { + speedstep_driver = NULL; + kfree(driver); + } + + return result; +} + + +/** + * speedstep_exit - unregisters SpeedStep support + * + * Unregisters SpeedStep support. + */ +static void __exit speedstep_exit(void) +{ + if (speedstep_driver) { + cpufreq_unregister(); + kfree(speedstep_driver); + } +} + + +MODULE_AUTHOR ("Dave Jones , Dominik Brodowski "); +MODULE_DESCRIPTION ("Speedstep driver for Intel mobile processors."); +MODULE_LICENSE ("GPL"); +module_init(speedstep_init); +module_exit(speedstep_exit); + +MODULE_PARM (speedstep_coppermine, "i"); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/time.c linux.21pre4-ac2/arch/i386/kernel/time.c --- linux.21pre4/arch/i386/kernel/time.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/time.c 2003-01-29 17:12:12.000000000 +0000 @@ -42,6 +42,8 @@ #include #include #include +#include +#include #include #include @@ -833,6 +835,50 @@ return 0; } +#ifdef CONFIG_CPU_FREQ +static unsigned int ref_freq = 0; +static unsigned long loops_per_jiffy_ref = 0; + +#ifndef CONFIG_SMP +static unsigned long fast_gettimeoffset_ref = 0; +static unsigned long cpu_khz_ref = 0; +#endif + +static int +time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_freqs *freq = data; + + if (!ref_freq) { + ref_freq = freq->old; + loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy; +#ifndef CONFIG_SMP + fast_gettimeoffset_ref = fast_gettimeoffset_quotient; + cpu_khz_ref = cpu_khz; +#endif + } + + if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || + (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { + cpu_data[freq->cpu].loops_per_jiffy = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); +#ifndef CONFIG_SMP + if (use_tsc) { + fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq); + cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new); + } +#endif + } + + return 0; +} + +static struct notifier_block time_cpufreq_notifier_block = { + .notifier_call = time_cpufreq_notifier +}; +#endif + + void __init time_init(void) { extern int x86_udelay_tsc; @@ -904,6 +950,9 @@ } } +#ifdef CONFIG_CPU_FREQ + cpufreq_register_notifier(&time_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); +#endif #ifdef CONFIG_VISWS printk("Starting Cobalt Timer system clock\n"); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/kernel/traps.c linux.21pre4-ac2/arch/i386/kernel/traps.c --- linux.21pre4/arch/i386/kernel/traps.c 2003-01-29 16:27:05.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/kernel/traps.c 2003-01-06 15:42:33.000000000 +0000 @@ -284,6 +284,20 @@ void die(const char * str, struct pt_regs * regs, long err) { +#ifdef CONFIG_PNPBIOS + if (regs->xcs == 0x60 || regs->xcs == 0x68) + { + extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp; + extern u32 pnp_bios_is_utter_crap; + pnp_bios_is_utter_crap = 1; + printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n"); + __asm__ volatile( + "movl %0, %%esp\n\t" + "jmp %1\n\t" + : "=a" (pnp_bios_fault_esp), "=b" (pnp_bios_fault_eip)); + panic("do_trap: can't hit this"); + } +#endif console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/lib/delay.c linux.21pre4-ac2/arch/i386/lib/delay.c --- linux.21pre4/arch/i386/lib/delay.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/lib/delay.c 2003-01-12 01:03:08.000000000 +0000 @@ -81,3 +81,8 @@ { __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */ } + +void __ndelay(unsigned long usecs) +{ + __const_udelay(usecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ +} diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/math-emu/fpu_system.h linux.21pre4-ac2/arch/i386/math-emu/fpu_system.h --- linux.21pre4/arch/i386/math-emu/fpu_system.h 2003-01-29 16:27:05.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/math-emu/fpu_system.h 2003-01-06 19:12:50.000000000 +0000 @@ -20,7 +20,7 @@ of the stack frame of math_emulate() */ #define SETUP_DATA_AREA(arg) FPU_info = (struct info *) &arg -#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.segments)[(s) >> 3]) +#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3]) #define SEG_D_SIZE(x) ((x).b & (3 << 21)) #define SEG_G_BIT(x) ((x).b & (1 << 23)) #define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/mm/fault.c linux.21pre4-ac2/arch/i386/mm/fault.c --- linux.21pre4/arch/i386/mm/fault.c 2003-01-29 16:27:05.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/mm/fault.c 2003-01-06 19:10:27.000000000 +0000 @@ -76,9 +76,7 @@ return 1; check_stack: - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, start) == 0) + if (!expand_stack(vma, start)) goto good_area; bad_area: diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/i386/mm/init.c linux.21pre4-ac2/arch/i386/mm/init.c --- linux.21pre4/arch/i386/mm/init.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/arch/i386/mm/init.c 2003-01-08 15:27:29.000000000 +0000 @@ -510,7 +510,15 @@ if (!mem_map) BUG(); - +#ifdef CONFIG_HIGHMEM + /* check that fixmap and pkmap do not overlap */ + if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { + printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n"); + printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n", + PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START); + BUG(); + } +#endif set_max_mapnr_init(); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/ia64/ia32/sys_ia32.c linux.21pre4-ac2/arch/ia64/ia32/sys_ia32.c --- linux.21pre4/arch/ia64/ia32/sys_ia32.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/arch/ia64/ia32/sys_ia32.c 2003-01-06 23:19:29.000000000 +0000 @@ -3782,7 +3782,11 @@ return result; } -struct dqblk32 { +extern asmlinkage long sys_quotactl(int cmd, const char *special, int id, caddr_t addr); + +#ifdef CONFIG_QIFACE_COMPAT +#ifdef CONFIG_QIFACE_V1 +struct user_dqblk32 { __u32 dqb_bhardlimit; __u32 dqb_bsoftlimit; __u32 dqb_curblocks; @@ -3792,49 +3796,82 @@ __kernel_time_t32 dqb_btime; __kernel_time_t32 dqb_itime; }; +typedef struct v1c_mem_dqblk comp_dqblk_t; -asmlinkage long -sys32_quotactl (int cmd, unsigned int special, int id, struct dqblk32 *addr) +#define Q_COMP_GETQUOTA Q_V1_GETQUOTA +#define Q_COMP_SETQUOTA Q_V1_SETQUOTA +#define Q_COMP_SETQLIM Q_V1_SETQLIM +#define Q_COMP_SETUSE Q_V1_SETUSE +#else +struct user_dqblk32 { + __u32 dqb_ihardlimit; + __u32 dqb_isoftlimit; + __u32 dqb_curinodes; + __u32 dqb_bhardlimit; + __u32 dqb_bsoftlimit; + __u64 dqb_curspace; + __kernel_time_t32 dqb_btime; + __kernel_time_t32 dqb_itime; +}; +typedef struct v2c_mem_dqblk comp_dqblk_t; + +#define Q_COMP_GETQUOTA Q_V2_GETQUOTA +#define Q_COMP_SETQUOTA Q_V2_SETQUOTA +#define Q_COMP_SETQLIM Q_V2_SETQLIM +#define Q_COMP_SETUSE Q_V2_SETUSE +#endif + +asmlinkage long sys32_quotactl(int cmd, const char *special, int id, caddr_t addr) { - extern asmlinkage long sys_quotactl (int, const char *, int, caddr_t); int cmds = cmd >> SUBCMDSHIFT; + long err; + comp_dqblk_t d; mm_segment_t old_fs; - struct dqblk d; char *spec; - long err; - + switch (cmds) { - case Q_GETQUOTA: - break; - case Q_SETQUOTA: - case Q_SETUSE: - case Q_SETQLIM: - if (copy_from_user (&d, addr, sizeof(struct dqblk32))) - return -EFAULT; - d.dqb_itime = ((struct dqblk32 *)&d)->dqb_itime; - d.dqb_btime = ((struct dqblk32 *)&d)->dqb_btime; - break; - default: - return sys_quotactl(cmd, (void *) A(special), id, (caddr_t) addr); + case Q_COMP_GETQUOTA: + break; + case Q_COMP_SETQUOTA: + case Q_COMP_SETUSE: + case Q_COMP_SETQLIM: + if (copy_from_user(&d, (struct user_dqblk32 *)addr, + sizeof (struct user_dqblk32))) + return -EFAULT; + d.dqb_itime = ((struct user_dqblk32 *)&d)->dqb_itime; + d.dqb_btime = ((struct user_dqblk32 *)&d)->dqb_btime; + break; + default: + return sys_quotactl(cmd, special, id, (__kernel_caddr_t)addr); } - spec = getname32((void *) A(special)); + spec = getname (special); err = PTR_ERR(spec); - if (IS_ERR(spec)) + if (IS_ERR(spec)) return err; + old_fs = get_fs(); + set_fs (KERNEL_DS); + err = sys_quotactl(cmd, (const char *)spec, id, (__kernel_caddr_t)&d); + set_fs (old_fs); + putname (spec); + if (err) return err; - old_fs = get_fs (); - set_fs(KERNEL_DS); - err = sys_quotactl(cmd, (const char *)spec, id, (caddr_t)&d); - set_fs(old_fs); - putname(spec); - if (cmds == Q_GETQUOTA) { + if (cmds == Q_COMP_GETQUOTA) { __kernel_time_t b = d.dqb_btime, i = d.dqb_itime; - ((struct dqblk32 *)&d)->dqb_itime = i; - ((struct dqblk32 *)&d)->dqb_btime = b; - if (copy_to_user(addr, &d, sizeof(struct dqblk32))) + ((struct user_dqblk32 *)&d)->dqb_itime = i; + ((struct user_dqblk32 *)&d)->dqb_btime = b; + if (copy_to_user ((struct user_dqblk32 *)addr, &d, + sizeof (struct user_dqblk32))) return -EFAULT; } - return err; + return 0; +} + +#else +/* No conversion needed for new interface */ +asmlinkage long sys32_quotactl(int cmd, const char *special, int id, caddr_t addr) +{ + return sys_quotactl(cmd, special, id, addr); } +#endif asmlinkage long sys32_sched_rr_get_interval (pid_t pid, struct timespec32 *interval) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/ia64/mm/fault.c linux.21pre4-ac2/arch/ia64/mm/fault.c --- linux.21pre4/arch/ia64/mm/fault.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/arch/ia64/mm/fault.c 2003-01-06 19:13:14.000000000 +0000 @@ -127,8 +127,6 @@ check_expansion: if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) { - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if (rgn_index(address) != rgn_index(vma->vm_start) || rgn_offset(address) >= RGN_MAP_LIMIT) goto bad_area; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/m68k/config.in linux.21pre4-ac2/arch/m68k/config.in --- linux.21pre4/arch/m68k/config.in 2003-01-29 16:27:11.000000000 +0000 +++ linux.21pre4-ac2/arch/m68k/config.in 2003-01-28 16:35:25.000000000 +0000 @@ -517,9 +517,12 @@ if [ "$CONFIG_SUN3" = "y" ]; then define_bool CONFIG_GEN_RTC y else - bool 'Generic /dev/rtc emulation' CONFIG_GEN_RTC + tristate 'Generic /dev/rtc emulation' CONFIG_GEN_RTC fi fi +if [ "$CONFIG_GEN_RTC" != "n" ]; then + bool ' Extended RTC operation' CONFIG_GEN_RTC_X +fi bool 'Unix98 PTY support' CONFIG_UNIX98_PTYS if [ "$CONFIG_UNIX98_PTYS" = "y" ]; then int ' Maximum number of Unix98 PTYs in use (0-2048)' CONFIG_UNIX98_PTY_COUNT 256 diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/m68k/kernel/m68k_ksyms.c linux.21pre4-ac2/arch/m68k/kernel/m68k_ksyms.c --- linux.21pre4/arch/m68k/kernel/m68k_ksyms.c 2003-01-29 16:27:11.000000000 +0000 +++ linux.21pre4-ac2/arch/m68k/kernel/m68k_ksyms.c 2003-01-28 16:35:25.000000000 +0000 @@ -18,6 +18,7 @@ #include #include #include +#include asmlinkage long long __ashldi3 (long long, int); asmlinkage long long __ashrdi3 (long long, int); @@ -49,6 +50,10 @@ EXPORT_SYMBOL(kernel_set_cachemode); #endif /* !CONFIG_SUN3 */ EXPORT_SYMBOL(m68k_debug_device); +EXPORT_SYMBOL(mach_hwclk); +EXPORT_SYMBOL(mach_get_ss); +EXPORT_SYMBOL(mach_get_rtc_pll); +EXPORT_SYMBOL(mach_set_rtc_pll); EXPORT_SYMBOL(dump_fpu); EXPORT_SYMBOL(dump_thread); EXPORT_SYMBOL(strnlen); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/m68k/kernel/setup.c linux.21pre4-ac2/arch/m68k/kernel/setup.c --- linux.21pre4/arch/m68k/kernel/setup.c 2003-01-29 16:27:11.000000000 +0000 +++ linux.21pre4-ac2/arch/m68k/kernel/setup.c 2003-01-28 16:35:25.000000000 +0000 @@ -90,6 +90,9 @@ void (*mach_gettod) (int*, int*, int*, int*, int*, int*); int (*mach_hwclk) (int, struct rtc_time*) = NULL; int (*mach_set_clock_mmss) (unsigned long) = NULL; +unsigned int (*mach_get_ss)(void) = NULL; +int (*mach_get_rtc_pll)(struct rtc_pll_info *) = NULL; +int (*mach_set_rtc_pll)(struct rtc_pll_info *) = NULL; void (*mach_reset)( void ); void (*mach_halt)( void ) = NULL; void (*mach_power_off)( void ) = NULL; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/m68k/q40/config.c linux.21pre4-ac2/arch/m68k/q40/config.c --- linux.21pre4/arch/m68k/q40/config.c 2003-01-29 16:27:14.000000000 +0000 +++ linux.21pre4-ac2/arch/m68k/q40/config.c 2003-01-28 16:35:25.000000000 +0000 @@ -58,7 +58,10 @@ extern void q40_gettod (int *year, int *mon, int *day, int *hour, int *min, int *sec); extern int q40_hwclk (int, struct rtc_time *); +extern unsigned int q40_get_ss (void); extern int q40_set_clock_mmss (unsigned long); +static int q40_get_rtc_pll(struct rtc_pll_info *pll); +static int q40_set_rtc_pll(struct rtc_pll_info *pll); extern void q40_reset (void); void q40_halt(void); extern void q40_waitbut(void); @@ -196,6 +199,9 @@ mach_gettimeoffset = q40_gettimeoffset; mach_gettod = q40_gettod; mach_hwclk = q40_hwclk; + mach_get_ss = q40_get_ss; + mach_get_rtc_pll = q40_get_rtc_pll; + mach_set_rtc_pll = q40_set_rtc_pll; mach_set_clock_mmss = q40_set_clock_mmss; mach_reset = q40_reset; @@ -331,6 +337,11 @@ return 0; } +unsigned int q40_get_ss() +{ + return bcd2bin(Q40_RTC_SECS); +} + /* * Set the minutes and seconds from seconds value 'nowtime'. Fail if * clock is out by > 30 minutes. Logic lifted from atari code. @@ -362,3 +373,33 @@ return retval; } + +/* get and set PLL calibration of RTC clock */ +#define Q40_RTC_PLL_MASK ((1<<5)-1) +#define Q40_RTC_PLL_SIGN (1<<5) + +static int q40_get_rtc_pll(struct rtc_pll_info *pll) +{ + int tmp=Q40_RTC_CTRL; + pll->pll_value = tmp & Q40_RTC_PLL_MASK; + if (tmp & Q40_RTC_PLL_SIGN) + pll->pll_value = -pll->pll_value; + pll->pll_max=31; + pll->pll_min=-31; + pll->pll_posmult=512; + pll->pll_negmult=256; + pll->pll_clock=125829120; + return 0; + } + +static int q40_set_rtc_pll(struct rtc_pll_info *pll) +{ + if (!pll->pll_ctrl){ + /* the docs are a bit unclear so I am doublesetting RTC_WRITE here ... */ + int tmp=(pll->pll_value & 31) | (pll->pll_value<0 ? 32 : 0) | Q40_RTC_WRITE; + Q40_RTC_CTRL |= Q40_RTC_WRITE; + Q40_RTC_CTRL = tmp; + Q40_RTC_CTRL &= ~(Q40_RTC_WRITE); + return 0; + } else return -EINVAL; +} diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/mips/mm/fault.c linux.21pre4-ac2/arch/mips/mm/fault.c --- linux.21pre4/arch/mips/mm/fault.c 2003-01-29 16:27:06.000000000 +0000 +++ linux.21pre4-ac2/arch/mips/mm/fault.c 2003-01-06 19:13:14.000000000 +0000 @@ -111,8 +111,6 @@ goto bad_area; if (vma->vm_start <= address) goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/mips64/mm/fault.c linux.21pre4-ac2/arch/mips64/mm/fault.c --- linux.21pre4/arch/mips64/mm/fault.c 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/mips64/mm/fault.c 2003-01-06 19:13:14.000000000 +0000 @@ -135,8 +135,6 @@ goto bad_area; if (vma->vm_start <= address) goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/config.in linux.21pre4-ac2/arch/parisc/config.in --- linux.21pre4/arch/parisc/config.in 2003-01-29 16:27:19.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/config.in 2003-01-06 15:38:40.000000000 +0000 @@ -47,9 +47,6 @@ bool 'Symmetric multi-processing support' CONFIG_SMP bool 'Chassis LCD and LED support' CONFIG_CHASSIS_LCD_LED -bool 'Kernel Debugger support' CONFIG_KWDB -# define_bool CONFIG_KWDB n - bool 'U2/Uturn I/O MMU' CONFIG_IOMMU_CCIO bool 'VSC/GSC/HSC bus support' CONFIG_GSC dep_bool ' Lasi I/O support' CONFIG_GSC_LASI $CONFIG_GSC @@ -194,6 +191,7 @@ #bool 'Debug kmalloc/kfree' CONFIG_DEBUG_MALLOC bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ +bool 'Debug spinlocks' CONFIG_DEBUG_SPINLOCK endmenu source lib/Config.in diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/defconfig linux.21pre4-ac2/arch/parisc/defconfig --- linux.21pre4/arch/parisc/defconfig 2003-01-29 16:27:19.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/defconfig 2003-01-06 15:38:40.000000000 +0000 @@ -32,7 +32,6 @@ # # CONFIG_SMP is not set CONFIG_CHASSIS_LCD_LED=y -# CONFIG_KWDB is not set CONFIG_IOMMU_CCIO=y CONFIG_GSC=y CONFIG_GSC_LASI=y @@ -87,7 +86,9 @@ # CONFIG_PARIDE is not set # CONFIG_BLK_CPQ_DA is not set # CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_CISS_SCSI_TAPE is not set # CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set CONFIG_BLK_DEV_LOOP=y # CONFIG_BLK_DEV_NBD is not set CONFIG_BLK_DEV_RAM=y @@ -138,6 +139,11 @@ # # CONFIG_IPX is not set # CONFIG_ATALK is not set + +# +# Appletalk devices +# +# CONFIG_DEV_APPLETALK is not set # CONFIG_DECNET is not set # CONFIG_BRIDGE is not set # CONFIG_X25 is not set @@ -155,6 +161,11 @@ # CONFIG_NET_SCHED is not set # +# Network testing +# +# CONFIG_NET_PKTGEN is not set + +# # ATA/IDE/MFM/RLL support # CONFIG_IDE=y @@ -171,6 +182,7 @@ # CONFIG_BLK_DEV_HD is not set # CONFIG_BLK_DEV_IDEDISK is not set # CONFIG_IDEDISK_MULTI_MODE is not set +# CONFIG_IDEDISK_STROKE is not set # CONFIG_BLK_DEV_IDEDISK_VENDOR is not set # CONFIG_BLK_DEV_IDEDISK_FUJITSU is not set # CONFIG_BLK_DEV_IDEDISK_IBM is not set @@ -185,6 +197,7 @@ # CONFIG_BLK_DEV_IDETAPE is not set # CONFIG_BLK_DEV_IDEFLOPPY is not set # CONFIG_BLK_DEV_IDESCSI is not set +# CONFIG_IDE_TASK_IOCTL is not set # # IDE chipset support/bugfixes @@ -196,12 +209,15 @@ CONFIG_BLK_DEV_IDEPCI=y # CONFIG_IDEPCI_SHARE_IRQ is not set CONFIG_BLK_DEV_IDEDMA_PCI=y -CONFIG_BLK_DEV_ADMA=y # CONFIG_BLK_DEV_OFFBOARD is not set +# CONFIG_BLK_DEV_IDEDMA_FORCED is not set # CONFIG_IDEDMA_PCI_AUTO is not set +# CONFIG_IDEDMA_ONLYDISK is not set CONFIG_BLK_DEV_IDEDMA=y # CONFIG_IDEDMA_PCI_WIP is not set +# CONFIG_BLK_DEV_IDEDMA_TIMEOUT is not set # CONFIG_IDEDMA_NEW_DRIVE_LISTINGS is not set +CONFIG_BLK_DEV_ADMA=y # CONFIG_BLK_DEV_AEC62XX is not set # CONFIG_AEC62XX_TUNING is not set # CONFIG_BLK_DEV_ALI15X3 is not set @@ -209,6 +225,7 @@ # CONFIG_BLK_DEV_AMD74XX is not set # CONFIG_AMD74XX_OVERRIDE is not set # CONFIG_BLK_DEV_CMD64X is not set +# CONFIG_BLK_DEV_CMD680 is not set # CONFIG_BLK_DEV_CY82C693 is not set # CONFIG_BLK_DEV_CS5530 is not set # CONFIG_BLK_DEV_HPT34X is not set @@ -296,9 +313,13 @@ CONFIG_53C700_USE_CONSISTENT=y # CONFIG_SCSI_NCR53C7xx is not set # CONFIG_SCSI_SYM53C8XX_2 is not set -# CONFIG_SCSI_NCR53C8XX is not set -CONFIG_SCSI_SYM53C8XX=y +# CONFIG_ASK_ZALON is not set +# CONFIG_ASK_NCR53C8XX is not set +# CONFIG_ASK_SYM53C8XX is not set CONFIG_SCSI_ZALON=y +CONFIG_ASK_ZALON=y +CONFIG_SCSI_SYM53C8XX=y +CONFIG_ASK_SYM53C8XX=y CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS=8 CONFIG_SCSI_NCR53C8XX_MAX_TAGS=32 CONFIG_SCSI_NCR53C8XX_SYNC=20 @@ -366,6 +387,7 @@ # CONFIG_APRICOT is not set # CONFIG_CS89x0 is not set CONFIG_TULIP=y +# CONFIG_TC35815 is not set # CONFIG_TULIP_MWI is not set # CONFIG_TULIP_MMIO is not set # CONFIG_DE4X5 is not set @@ -403,6 +425,7 @@ # CONFIG_HAMACHI is not set # CONFIG_YELLOWFIN is not set # CONFIG_SK98LIN is not set +# CONFIG_TIGON3 is not set # CONFIG_FDDI is not set # CONFIG_HIPPI is not set # CONFIG_PLIP is not set @@ -473,6 +496,7 @@ CONFIG_PSMOUSE=y # CONFIG_82C710_MOUSE is not set # CONFIG_PC110_PAD is not set +# CONFIG_MK712_MOUSE is not set # # Joysticks @@ -515,7 +539,6 @@ # # CONFIG_WATCHDOG is not set CONFIG_GENRTC=y -# CONFIG_INTEL_RNG is not set # CONFIG_NVRAM is not set # CONFIG_RTC is not set # CONFIG_DTLK is not set @@ -585,7 +608,7 @@ # CONFIG_JFFS2_FS is not set # CONFIG_CRAMFS is not set # CONFIG_TMPFS is not set -# CONFIG_RAMFS is not set +CONFIG_RAMFS=y CONFIG_ISO9660_FS=y CONFIG_JOLIET=y # CONFIG_ZISOFS is not set @@ -697,6 +720,7 @@ # CONFIG_FB_RIVA is not set # CONFIG_FB_CLGEN is not set # CONFIG_FB_PM2 is not set +# CONFIG_FB_PM3 is not set # CONFIG_FB_CYBER2000 is not set CONFIG_FB_STI=y # CONFIG_FB_MATROX is not set @@ -704,6 +728,7 @@ # CONFIG_FB_RADEON is not set # CONFIG_FB_ATY128 is not set # CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set # CONFIG_FB_3DFX is not set # CONFIG_FB_VOODOO1 is not set # CONFIG_FB_TRIDENT is not set @@ -759,106 +784,6 @@ # CONFIG_USB is not set # -# USB Controllers -# -# CONFIG_USB_UHCI is not set -# CONFIG_USB_UHCI_ALT is not set -# CONFIG_USB_OHCI is not set - -# -# USB Device Class drivers -# -# CONFIG_USB_AUDIO is not set -# CONFIG_USB_BLUETOOTH is not set -# CONFIG_USB_STORAGE is not set -# CONFIG_USB_STORAGE_DEBUG is not set -# CONFIG_USB_STORAGE_DATAFAB is not set -# CONFIG_USB_STORAGE_FREECOM is not set -# CONFIG_USB_STORAGE_ISD200 is not set -# CONFIG_USB_STORAGE_DPCM is not set -# CONFIG_USB_STORAGE_HP8200e is not set -# CONFIG_USB_STORAGE_SDDR09 is not set -# CONFIG_USB_STORAGE_JUMPSHOT is not set -# CONFIG_USB_ACM is not set -# CONFIG_USB_PRINTER is not set - -# -# USB Human Interface Devices (HID) -# -# CONFIG_USB_HID is not set -# CONFIG_USB_HIDDEV is not set -# CONFIG_USB_KBD is not set -# CONFIG_USB_MOUSE is not set -# CONFIG_USB_WACOM is not set - -# -# USB Imaging devices -# -# CONFIG_USB_DC2XX is not set -# CONFIG_USB_MDC800 is not set -# CONFIG_USB_SCANNER is not set -# CONFIG_USB_MICROTEK is not set -# CONFIG_USB_HPUSBSCSI is not set - -# -# USB Multimedia devices -# - -# -# Video4Linux support is needed for USB Multimedia device support -# - -# -# USB Network adaptors -# -# CONFIG_USB_PEGASUS is not set -# CONFIG_USB_KAWETH is not set -# CONFIG_USB_CATC is not set -# CONFIG_USB_CDCETHER is not set -# CONFIG_USB_USBNET is not set - -# -# USB port drivers -# -# CONFIG_USB_USS720 is not set - -# -# USB Serial Converter support -# -# CONFIG_USB_SERIAL is not set -# CONFIG_USB_SERIAL_GENERIC is not set -# CONFIG_USB_SERIAL_BELKIN is not set -# CONFIG_USB_SERIAL_WHITEHEAT is not set -# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set -# CONFIG_USB_SERIAL_EMPEG is not set -# CONFIG_USB_SERIAL_FTDI_SIO is not set -# CONFIG_USB_SERIAL_VISOR is not set -# CONFIG_USB_SERIAL_IPAQ is not set -# CONFIG_USB_SERIAL_IR is not set -# CONFIG_USB_SERIAL_EDGEPORT is not set -# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set -# CONFIG_USB_SERIAL_KEYSPAN is not set -# CONFIG_USB_SERIAL_KEYSPAN_USA28 is not set -# CONFIG_USB_SERIAL_KEYSPAN_USA28X is not set -# CONFIG_USB_SERIAL_KEYSPAN_USA28XA is not set -# CONFIG_USB_SERIAL_KEYSPAN_USA28XB is not set -# CONFIG_USB_SERIAL_KEYSPAN_USA19 is not set -# CONFIG_USB_SERIAL_KEYSPAN_USA18X is not set -# CONFIG_USB_SERIAL_KEYSPAN_USA19W is not set -# CONFIG_USB_SERIAL_KEYSPAN_USA49W is not set -# CONFIG_USB_SERIAL_MCT_U232 is not set -# CONFIG_USB_SERIAL_KLSI is not set -# CONFIG_USB_SERIAL_PL2303 is not set -# CONFIG_USB_SERIAL_CYBERJACK is not set -# CONFIG_USB_SERIAL_XIRCOM is not set -# CONFIG_USB_SERIAL_OMNINET is not set - -# -# USB Miscellaneous drivers -# -# CONFIG_USB_RIO500 is not set - -# # Kernel hacking # CONFIG_MAGIC_SYSRQ=y diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/ccio-dma.c linux.21pre4-ac2/arch/parisc/kernel/ccio-dma.c --- linux.21pre4/arch/parisc/kernel/ccio-dma.c 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/ccio-dma.c 2003-01-06 15:38:40.000000000 +0000 @@ -609,8 +609,6 @@ return 0; } - dev->dma_mask = mask; /* save it */ - /* only support 32-bit devices (ie PCI/GSC) */ return (int)(mask == 0xffffffffUL); } @@ -1441,7 +1439,7 @@ result = request_resource(&iomem_resource, res); if (result < 0) { printk(KERN_ERR - "%s: failed to claim CCIO bus address space (%p,%p)\n", + "%s: failed to claim CCIO bus address space (%lx,%lx)\n", __FILE__, res->start, res->end); } } @@ -1486,7 +1484,7 @@ int ccio_allocate_resource(const struct parisc_device *dev, struct resource *res, unsigned long size, unsigned long min, unsigned long max, unsigned long align, - void (*alignf)(void *, struct resource *, unsigned long), + void (*alignf)(void *, struct resource *, unsigned long, unsigned long), void *alignf_data) { struct ioc *ioc = ccio_get_iommu(dev); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/ccio-rm-dma.c linux.21pre4-ac2/arch/parisc/kernel/ccio-rm-dma.c --- linux.21pre4/arch/parisc/kernel/ccio-rm-dma.c 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/ccio-rm-dma.c 2003-01-06 15:38:40.000000000 +0000 @@ -74,8 +74,6 @@ return(0); } - dev->dma_mask = mask; /* save it */ - /* only support 32-bit devices (ie PCI/GSC) */ return((int) (mask >= 0xffffffffUL)); } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/drivers.c linux.21pre4-ac2/arch/parisc/kernel/drivers.c --- linux.21pre4/arch/parisc/kernel/drivers.c 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/drivers.c 2003-01-06 15:38:40.000000000 +0000 @@ -461,6 +461,7 @@ #define BC_PORT_MASK 0x8 #define BC_LOWER_PORT 0x8 + #define BUS_CONVERTER(dev) \ ((dev->id.hw_type == HPHW_IOA) || (dev->id.hw_type == HPHW_BCPORT)) @@ -473,14 +474,29 @@ __raw_readl((unsigned long)&((struct bc_module *)dev->hpa)->io_io_low) << 16 : \ __raw_readl((unsigned long)&((struct bc_module *)dev->hpa)->io_io_low)) -static void walk_native_bus(unsigned long addr, struct parisc_device *parent); +#define READ_IO_IO_HIGH(dev) \ + (dev->id.hw_type == HPHW_IOA ? \ + __raw_readl((unsigned long)&((struct bc_module *)dev->hpa)->io_io_high) << 16 : \ + __raw_readl((unsigned long)&((struct bc_module *)dev->hpa)->io_io_high)) + + +static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high, + struct parisc_device *parent); + +#define FLEX_MASK (unsigned long)0xfffffffffffc0000 + + void walk_lower_bus(struct parisc_device *dev) { + unsigned long io_io_low, io_io_high; if(!BUS_CONVERTER(dev) || IS_LOWER_PORT(dev)) return; - walk_native_bus((unsigned long)(signed int)READ_IO_IO_LOW(dev), dev); + io_io_low = ((unsigned long)(signed int)READ_IO_IO_LOW(dev) + ~FLEX_MASK) & FLEX_MASK; + io_io_high = ((unsigned long)(signed int)READ_IO_IO_HIGH(dev) + ~FLEX_MASK) & FLEX_MASK; + + walk_native_bus(io_io_low, io_io_high, dev); } #define MAX_NATIVE_DEVICES 64 @@ -488,7 +504,9 @@ /** * walk_native_bus -- Probe a bus for devices - * @addr: Base address of this bus. + * @io_io_low: Base address of this bus. + * @io_io_high: Last address of this bus. + * @parent: The parent bus device. * * A native bus (eg Runway or GSC) may have up to 64 devices on it, * spaced at intervals of 0x1000 bytes. PDC may not inform us of these @@ -496,28 +514,32 @@ * devices which are not physically connected (such as extra serial & * keyboard ports). This problem is not yet solved. */ -static void walk_native_bus(unsigned long addr, struct parisc_device *parent) +static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high, + struct parisc_device *parent) { - int i; + int i, devices_found = 0; + unsigned long hpa = io_io_low; struct hardware_path path; get_node_path(parent, &path); - for (i = 0; i < MAX_NATIVE_DEVICES; i++) { - unsigned long hpa = (addr + i * NATIVE_DEVICE_OFFSET); - struct parisc_device *dev; - - /* Was the device already added by Firmware? */ - dev = find_device_by_addr(hpa); - if (!dev) { - path.mod = i; - dev = alloc_pa_dev(hpa, &path); - if (!dev) - continue; - - register_parisc_device(dev); + do { + for (i = 0; i < MAX_NATIVE_DEVICES; i++, hpa += NATIVE_DEVICE_OFFSET) { + struct parisc_device *dev; + + /* Was the device already added by Firmware? */ + dev = find_device_by_addr(hpa); + if (!dev) { + path.mod = i; + dev = alloc_pa_dev(hpa, &path); + if (!dev) + continue; + + register_parisc_device(dev); + devices_found++; + } + walk_lower_bus(dev); } - walk_lower_bus(dev); - } + } while (!devices_found && hpa < io_io_high); } #define CENTRAL_BUS_ADDR (unsigned long) 0xfffffffffff80000 @@ -530,7 +552,9 @@ */ void walk_central_bus(void) { - walk_native_bus(CENTRAL_BUS_ADDR, &root); + walk_native_bus(CENTRAL_BUS_ADDR, + CENTRAL_BUS_ADDR + (MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET), + &root); } void fixup_child_irqs(struct parisc_device *parent, int base, diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/entry.S linux.21pre4-ac2/arch/parisc/kernel/entry.S --- linux.21pre4/arch/parisc/kernel/entry.S 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/entry.S 2003-01-06 15:38:40.000000000 +0000 @@ -759,32 +759,30 @@ .import do_softirq,code intr_do_softirq: - bl do_softirq,%r2 #ifdef __LP64__ ldo -16(%r30),%r29 /* Reference param save area */ -#else - nop #endif - b intr_check_resched - nop + ldil L%intr_check_resched, %r2 + b do_softirq + ldo R%intr_check_resched(%r2), %r2 .import schedule,code intr_do_resched: /* Only do reschedule if we are returning to user space */ LDREG PT_IASQ0(%r16), %r20 - CMPIB= 0,%r20,intr_restore /* backward */ + CMPIB= 0,%r20,intr_check_sig /* backward */ nop LDREG PT_IASQ1(%r16), %r20 - CMPIB= 0,%r20,intr_restore /* backward */ + CMPIB= 0,%r20,intr_check_sig /* backward */ nop #ifdef __LP64__ ldo -16(%r30),%r29 /* Reference param save area */ #endif - ldil L%intr_return, %r2 + ldil L%intr_check_sig, %r2 b schedule - ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ + ldo R%intr_check_sig(%r2), %r2 .import do_signal,code @@ -799,15 +797,14 @@ copy %r0, %r24 /* unsigned long in_syscall */ copy %r16, %r25 /* struct pt_regs *regs */ - ssm PSW_SM_I, %r0 + copy %r0, %r26 /* sigset_t *oldset = NULL */ #ifdef __LP64__ ldo -16(%r30),%r29 /* Reference param save area */ #endif - bl do_signal,%r2 - copy %r0, %r26 /* sigset_t *oldset = NULL */ + ldil L%intr_restore, %r2 + b do_signal + ldo R%intr_restore(%r2), %r2 - b intr_restore - nop /* * External interrupts. @@ -852,7 +849,7 @@ ldo -16(%r30),%r29 /* Reference param save area */ #endif b do_cpu_irq_mask - ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ + ldo R%intr_return(%r2), %r2 @@ -925,19 +922,16 @@ loadgp copy %r29, %r25 /* arg1 is pt_regs */ -#ifdef CONFIG_KWDB - copy %r29, %r3 /* KWDB - update frame pointer (gr3) */ -#endif #ifdef __LP64__ ldo -16(%r30),%r29 /* Reference param save area */ #endif - ldil L%intr_return, %r2 + ldil L%intr_restore, %r2 copy %r25, %r16 /* save pt_regs */ b handle_interruption - ldo R%intr_return(%r2), %r2 /* return to intr_return */ + ldo R%intr_restore(%r2), %r2 /* @@ -1319,9 +1313,9 @@ /* Get rid of prot bits and convert to page addr for idtlbt */ - extrd,s pte,35,4,t0 + extrd,s pte,35,4,t1 depdi 0,63,12,pte /* clear lower 12 bits */ - addi,= 1,t0,0 + addi,= 1,t1,0 extrd,u,*tr pte,56,25,pte extrd,s pte,56,25,pte /* bit 31:8 >> 8 */ idtlbt pte,prot @@ -1389,9 +1383,9 @@ /* Get rid of prot bits and convert to page addr for idtlbt */ - extrd,s pte,35,4,t0 + extrd,s pte,35,4,t1 depdi 0,63,12,pte /* clear lower 12 bits */ - addi,= 1,t0,0 + addi,= 1,t1,0 extrd,u,*tr pte,56,25,pte extrd,s pte,56,25,pte /* bit 31:8 >> 8 */ idtlbt pte,prot @@ -1642,9 +1636,9 @@ /* Get rid of prot bits and convert to page addr for iitlbt */ - extrd,s pte,35,4,t0 + extrd,s pte,35,4,t1 depdi 0,63,12,pte /* clear lower 12 bits */ - addi,= 1,t0,0 + addi,= 1,t1,0 extrd,u,*tr pte,56,25,pte extrd,s pte,56,25,pte /* bit 31:8 >> 8 */ iitlbt pte,prot @@ -1860,9 +1854,9 @@ extrd,u,*= pte,_PAGE_GATEWAY_BIT+32,1,r0 depdi 0,11,2,prot /* If Gateway, Set PL2 to 0 */ - extrd,s pte,35,4,t0 + extrd,s pte,35,4,t1 depdi 0,63,12,pte /* clear lower 12 bits */ - addi,= 1,t0,0 + addi,= 1,t1,0 extrd,u,*tr pte,56,25,pte extrd,s pte,56,25,pte /* bit 31:8 >> 8 */ idtlbt pte,prot @@ -2358,9 +2352,9 @@ STREG %r2,TASK_PT_IAOQ0(%r1) ldo 4(%r2),%r2 STREG %r2,TASK_PT_IAOQ1(%r1) - copy %r25,%r16 + b intr_restore - nop + copy %r25,%r16 .import do_softirq,code syscall_do_softirq: diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/firmware.c linux.21pre4-ac2/arch/parisc/kernel/firmware.c --- linux.21pre4/arch/parisc/kernel/firmware.c 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/firmware.c 2003-01-06 15:38:40.000000000 +0000 @@ -1,4 +1,4 @@ -/* arch/parisc/kernel/pdc.c - safe pdc access routines +/* arch/parisc/kernel/firmware.c - safe pdc access routines * * Copyright 1999 SuSE GmbH Nuernberg (Philipp Rumpf, prumpf@tux.org) * portions Copyright 1999 The Puffin Group, (Alex deVries, David Kennedy) @@ -177,6 +177,45 @@ } /** + * pdc_pat_chassis_send_log - Sends a PDC PAT CHASSIS log message. + * @retval: -1 on error, 0 on success. Other value are PDC errors + * + * Must be correctly formatted or expect system crash + */ +#ifdef __LP64__ +int pdc_pat_chassis_send_log(unsigned long state, unsigned long data) +{ + if (!is_pdc_pat()) + return -1; + + int retval = 0; + + spin_lock_irq(&pdc_lock); + retval = mem_pdc_call(PDC_PAT_CHASSIS_LOG, PDC_PAT_CHASSIS_WRITE_LOG, __pa(&state), __pa(&data)); + spin_unlock_irq(&pdc_lock); + + return retval; +} +#endif + +/** + * pdc_chassis_disp - Updates display + * @retval: -1 on error, 0 on success + * + * Works on old PDC only (E class, others?) + */ +int pdc_chassis_disp(unsigned long disp) +{ + int retval = 0; + + spin_lock_irq(&pdc_lock); + retval = mem_pdc_call(PDC_CHASSIS, PDC_CHASSIS_DISP, disp); + spin_unlock_irq(&pdc_lock); + + return retval; +} + +/** * pdc_coproc_cfg - To identify coprocessors attached to the processor. * @pdc_coproc_info: Return buffer address. * @@ -507,12 +546,11 @@ /* BCJ-XXXX series boxes. E.G. "9000/785/C3000" */ #define IS_SPROCKETS() (strlen(boot_cpu_data.pdc.sys_model_name) == 14 && \ - strncmp(boot_cpu_data.pdc.sys_model_name, "9000/785", 9) == 0) + strncmp(boot_cpu_data.pdc.sys_model_name, "9000/785", 8) == 0) retval = mem_pdc_call(PDC_INITIATOR, PDC_GET_INITIATOR, __pa(pdc_result), __pa(hwpath)); - if (retval >= PDC_OK) { *scsi_id = (unsigned char) pdc_result[0]; @@ -534,10 +572,12 @@ ** pdc_result[3] PDC suggested SCSI rate */ + /* + ** XXX REVISIT: Doesn't look like PAT PDC does the same. + ** Problem is A500 also exports 50-pin SE SCSI port. + */ if (IS_SPROCKETS()) { /* - ** Revisit: PAT PDC do the same thing? - ** A500 also exports 50-pin SE SCSI. ** 0 == 8-bit ** 1 == 16-bit */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/head64.S linux.21pre4-ac2/arch/parisc/kernel/head64.S --- linux.21pre4/arch/parisc/kernel/head64.S 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/head64.S 2003-01-06 15:38:40.000000000 +0000 @@ -24,7 +24,7 @@ #include /* for PDC_PSW defines */ - .level 2.0 + .level 2.0w .section .initcall.init .align 4 diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/ioctl32.c linux.21pre4-ac2/arch/parisc/kernel/ioctl32.c --- linux.21pre4/arch/parisc/kernel/ioctl32.c 2003-01-29 16:27:19.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/ioctl32.c 2003-01-06 15:38:40.000000000 +0000 @@ -2986,6 +2986,9 @@ COMPATIBLE_IOCTL(BLKFRASET) COMPATIBLE_IOCTL(BLKSECTSET) COMPATIBLE_IOCTL(BLKSSZGET) +COMPATIBLE_IOCTL(BLKBSZGET) +COMPATIBLE_IOCTL(BLKBSZSET) +COMPATIBLE_IOCTL(BLKGETSIZE64) /* RAID */ COMPATIBLE_IOCTL(RAID_VERSION) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/irq.c linux.21pre4-ac2/arch/parisc/kernel/irq.c --- linux.21pre4/arch/parisc/kernel/irq.c 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/irq.c 2003-01-06 15:38:40.000000000 +0000 @@ -201,7 +201,7 @@ { #ifdef CONFIG_PROC_FS char *p = buf; - unsigned int regnr; + unsigned int regnr = 0; p += sprintf(p, " "); #ifdef CONFIG_SMP diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/lasimap.map linux.21pre4-ac2/arch/parisc/kernel/lasimap.map --- linux.21pre4/arch/parisc/kernel/lasimap.map 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/lasimap.map 1970-01-01 01:00:00.000000000 +0100 @@ -1,322 +0,0 @@ -# HP 712 kernel keymap. This uses 7 modifier combinations. - -keymaps 0-2,4-5,8,12 -# ie, plain, Shift, AltGr, Control, Control+Shift, Alt and Control+Alt - - -# Change the above line into -# keymaps 0-2,4-6,8,12 -# in case you want the entries -# altgr control keycode 83 = Boot -# altgr control keycode 111 = Boot -# below. -# -# In fact AltGr is used very little, and one more keymap can -# be saved by mapping AltGr to Alt (and adapting a few entries): -# keycode 100 = Alt -# -keycode 1 = F9 F19 Console_21 - control keycode 1 = F9 - alt keycode 1 = Console_9 - control alt keycode 1 = Console_9 -keycode 2 = -keycode 3 = F5 F15 Console_17 - control keycode 3 = F5 - alt keycode 3 = Console_5 - control alt keycode 3 = Console_5 -keycode 4 = F3 F13 Console_15 - control keycode 4 = F3 - alt keycode 4 = Console_3 - control alt keycode 4 = Console_3 -keycode 5 = F1 F11 Console_13 - control keycode 5 = F1 - alt keycode 5 = Console_1 - control alt keycode 5 = Console_1 -keycode 6 = F2 F12 Console_14 - control keycode 6 = F2 - alt keycode 6 = Console_2 - control alt keycode 6 = Console_2 -keycode 7 = F12 F12 Console_24 - control keycode 7 = F12 - alt keycode 7 = Console_12 - control alt keycode 7 = Console_12 -keycode 8 = -keycode 9 = F10 F20 Console_22 - control keycode 9 = F10 - alt keycode 9 = Console_10 - control alt keycode 9 = Console_10 -keycode 10 = F8 F18 Console_20 - control keycode 10 = F8 - alt keycode 10 = Console_8 - control alt keycode 10 = Console_8 -keycode 11 = F6 F16 Console_18 - control keycode 11 = F6 - alt keycode 11 = Console_6 - control alt keycode 11 = Console_6 -keycode 12 = F4 F14 Console_16 - control keycode 12 = F4 - alt keycode 12 = Console_4 - control alt keycode 12 = Console_4 -keycode 13 = Tab Tab - alt keycode 13 = Meta_Tab -keycode 14 = grave asciitilde - control keycode 14 = nul - alt keycode 14 = Meta_grave -keycode 15 = -keycode 16 = -keycode 17 = Alt -keycode 18 = Shift -keycode 19 = -keycode 20 = Control -keycode 21 = q -keycode 22 = one exclam exclam -keycode 23 = -keycode 24 = -keycode 25 = -keycode 26 = z -keycode 27 = s -keycode 28 = a - altgr keycode 28 = Hex_A -keycode 29 = w -keycode 30 = two at at -keycode 31 = -keycode 32 = -keycode 33 = c - altgr keycode 46 = Hex_C -keycode 34 = x -keycode 35 = d - altgr keycode 35 = Hex_D -keycode 36 = e - altgr keycode 36 = Hex_E -keycode 37 = four dollar -keycode 38 = three numbersign -keycode 39 = -keycode 40 = -keycode 41 = -keycode 42 = v -keycode 43 = f - altgr keycode 43 = Hex_F -keycode 44 = t -keycode 45 = r -keycode 46 = five percent -keycode 47 = -keycode 48 = -keycode 49 = n -keycode 50 = b - altgr keycode 50 = Hex_B -keycode 51 = h -keycode 52 = g -keycode 53 = y -keycode 54 = six asciicircum -keycode 55 = -keycode 56 = -keycode 57 = -keycode 58 = m -keycode 59 = j -keycode 60 = u -keycode 61 = seven ampersand -keycode 62 = eight asterisk asterisk -keycode 63 = -keycode 64 = -keycode 65 = comma less - alt keycode 65 = Meta_comma -keycode 66 = k -keycode 67 = i -keycode 68 = o -keycode 69 = zero parenright bracketright -keycode 70 = nine parenleft bracketleft -keycode 71 = -keycode 72 = -keycode 73 = period greater - control keycode 73 = Compose - alt keycode 73 = Meta_period -keycode 74 = slash question - control keycode 74 = Delete - alt keycode 53 = Meta_slash -keycode 75 = l -keycode 76 = semicolon colon - alt keycode 39 = Meta_semicolon -keycode 77 = p -keycode 78 = minus underscore -keycode 79 = -keycode 80 = -keycode 81 = -keycode 82 = apostrophe quotedbl - control keycode 82 = Control_g - alt keycode 40 = Meta_apostrophe -keycode 83 = -keycode 84 = bracketleft braceleft - control keycode 84 = Escape - alt keycode 26 = Meta_bracketleft -keycode 85 = equal plus -keycode 86 = -keycode 87 = -keycode 88 = Caps_Lock -keycode 88 = -keycode 89 = -keycode 89 = -keycode 89 = -keycode 90 = Return - alt keycode 90 = Meta_Control_m -keycode 91 = bracketright braceright asciitilde - control keycode 91 = Control_bracketright - alt keycode 91 = Meta_bracketright -keycode 92 = -keycode 93 = backslash bar - control keycode 43 = Control_backslash - alt keycode 43 = Meta_backslash -keycode 94 = -keycode 95 = -keycode 96 = -keycode 97 = -keycode 98 = -keycode 99 = -keycode 100 = -keycode 101 = -keycode 102 = BackSpace -keycode 103 = -keycode 104 = -keycode 105 = KP_1 - alt keycode 105 = Ascii_1 - altgr keycode 105 = Hex_1 -keycode 106 = -keycode 107 = KP_4 - alt keycode 107 = Ascii_4 - altgr keycode 107 = Hex_4 -keycode 108 = KP_7 - alt keycode 108 = Ascii_7 - altgr keycode 108 = Hex_7 -keycode 109 = -keycode 110 = -keycode 111 = -keycode 112 = KP_0 - alt keycode 82 = Ascii_0 - altgr keycode 82 = Hex_0 -keycode 113 = KP_Period -keycode 114 = KP_2 - alt keycode 114 = Ascii_2 - altgr keycode 114 = Hex_2 -keycode 115 = KP_5 - alt keycode 115 = Ascii_5 - altgr keycode 115 = Hex_5 -keycode 116 = KP_6 - alt keycode 116 = Ascii_6 - altgr keycode 116 = Hex_6 -keycode 117 = KP_8 - alt keycode 117 = Ascii_8 - altgr keycode 117 = Hex_8 -keycode 118 = Escape -keycode 119 = -keycode 120 = F11 -keycode 121 = KP_Add -keycode 122 = KP_3 - alt keycode 122 = Ascii_3 - altgr keycode 122 = Hex_3 -keycode 123 = KP_Subtract -keycode 124 = KP_Multiply -keycode 125 = KP_9 - alt keycode 125 = Ascii_9 - altgr keycode 125 = Hex_9 -keycode 126 = -# 131!! -keycode 127 = F7 F17 Console_19 - control keycode 127 = F7 - alt keycode 127 = Console_7 - control alt keycode 127 = Console_7 - -string F1 = "\033[[A" -string F2 = "\033[[B" -string F3 = "\033[[C" -string F4 = "\033[[D" -string F5 = "\033[[E" -string F6 = "\033[17~" -string F7 = "\033[18~" -string F8 = "\033[19~" -string F9 = "\033[20~" -string F10 = "\033[21~" -string F11 = "\033[23~" -string F12 = "\033[24~" -string F13 = "\033[25~" -string F14 = "\033[26~" -string F15 = "\033[28~" -string F16 = "\033[29~" -string F17 = "\033[31~" -string F18 = "\033[32~" -string F19 = "\033[33~" -string F20 = "\033[34~" -string Find = "\033[1~" -string Insert = "\033[2~" -string Remove = "\033[3~" -string Select = "\033[4~" -string Prior = "\033[5~" -string Next = "\033[6~" -string Macro = "\033[M" -string Pause = "\033[P" -compose '`' 'A' to 'À' -compose '`' 'a' to 'à' -compose '\'' 'A' to 'Á' -compose '\'' 'a' to 'á' -compose '^' 'A' to 'Â' -compose '^' 'a' to 'â' -compose '~' 'A' to 'Ã' -compose '~' 'a' to 'ã' -compose '"' 'A' to 'Ä' -compose '"' 'a' to 'ä' -compose 'O' 'A' to 'Å' -compose 'o' 'a' to 'å' -compose '0' 'A' to 'Å' -compose '0' 'a' to 'å' -compose 'A' 'A' to 'Å' -compose 'a' 'a' to 'å' -compose 'A' 'E' to 'Æ' -compose 'a' 'e' to 'æ' -compose ',' 'C' to 'Ç' -compose ',' 'c' to 'ç' -compose '`' 'E' to 'È' -compose '`' 'e' to 'è' -compose '\'' 'E' to 'É' -compose '\'' 'e' to 'é' -compose '^' 'E' to 'Ê' -compose '^' 'e' to 'ê' -compose '"' 'E' to 'Ë' -compose '"' 'e' to 'ë' -compose '`' 'I' to 'Ì' -compose '`' 'i' to 'ì' -compose '\'' 'I' to 'Í' -compose '\'' 'i' to 'í' -compose '^' 'I' to 'Î' -compose '^' 'i' to 'î' -compose '"' 'I' to 'Ï' -compose '"' 'i' to 'ï' -compose '-' 'D' to 'Ð' -compose '-' 'd' to 'ð' -compose '~' 'N' to 'Ñ' -compose '~' 'n' to 'ñ' -compose '`' 'O' to 'Ò' -compose '`' 'o' to 'ò' -compose '\'' 'O' to 'Ó' -compose '\'' 'o' to 'ó' -compose '^' 'O' to 'Ô' -compose '^' 'o' to 'ô' -compose '~' 'O' to 'Õ' -compose '~' 'o' to 'õ' -compose '"' 'O' to 'Ö' -compose '"' 'o' to 'ö' -compose '/' 'O' to 'Ø' -compose '/' 'o' to 'ø' -compose '`' 'U' to 'Ù' -compose '`' 'u' to 'ù' -compose '\'' 'U' to 'Ú' -compose '\'' 'u' to 'ú' -compose '^' 'U' to 'Û' -compose '^' 'u' to 'û' -compose '"' 'U' to 'Ü' -compose '"' 'u' to 'ü' -compose '\'' 'Y' to 'Ý' -compose '\'' 'y' to 'ý' -compose 'T' 'H' to 'Þ' -compose 't' 'h' to 'þ' -compose 's' 's' to 'ß' -compose '"' 'y' to 'ÿ' -compose 's' 'z' to 'ß' -compose 'i' 'j' to 'ÿ' diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/lba_pci.c linux.21pre4-ac2/arch/parisc/kernel/lba_pci.c --- linux.21pre4/arch/parisc/kernel/lba_pci.c 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/lba_pci.c 2003-01-06 15:38:40.000000000 +0000 @@ -735,47 +735,11 @@ bus->resource[0] = &(ldev->hba.io_space); bus->resource[1] = &(ldev->hba.lmmio_space); } else { - /* KLUGE ALERT! - ** PCI-PCI Bridge resource munging. - ** This hack should go away in the near future. - ** It's based on the Alpha port. - */ - int i; - u16 cmd; - - for (i = 0; i < 4; i++) { - bus->resource[i] = - &bus->self->resource[PCI_BRIDGE_RESOURCES+i]; - bus->resource[i]->name = bus->name; - } -#if 0 - bus->resource[0]->flags |= pci_bridge_check_io(bus->self); -#else - bus->resource[0]->flags |= IORESOURCE_IO; -#endif - bus->resource[1]->flags |= IORESOURCE_MEM; - bus->resource[2]->flags = 0; /* Don't support prefetchable */ - bus->resource[3]->flags = 0; /* not used */ - - /* - ** If the PPB is enabled (ie already configured) then - ** just read those values. - */ - (void) lba_cfg_read16(bus->self, PCI_COMMAND, &cmd); - if (cmd & (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)) { - pci_read_bridge_bases(bus); - } else { - /* Not configured. - ** For now, propogate HBA limits to the bus; - ** PCI will adjust them later. - */ - bus->resource[0]->end = ldev->hba.io_space.end; - bus->resource[1]->end = ldev->hba.lmmio_space.end; - } + pci_read_bridge_bases(bus); - /* Turn off downstream PF memory address range by default */ - bus->resource[2]->start = 1024*1024; - bus->resource[2]->end = bus->resource[2]->start - 1; + /* Turn off downstream PreFetchable Memory range by default */ + bus->resource[2]->start = 0; + bus->resource[2]->end = 0; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/led.c linux.21pre4-ac2/arch/parisc/kernel/led.c --- linux.21pre4/arch/parisc/kernel/led.c 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/led.c 2003-01-06 15:38:40.000000000 +0000 @@ -352,7 +352,8 @@ static unsigned long led_net_rx_counter, led_net_tx_counter; static void led_get_net_stats(int addvalue) -{ +{ +#ifdef CONFIG_NET static unsigned long rx_total_last, tx_total_last; unsigned long rx_total, tx_total; struct net_device *dev; @@ -383,6 +384,7 @@ rx_total_last += rx_total; tx_total_last += tx_total; +#endif } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/Makefile linux.21pre4-ac2/arch/parisc/kernel/Makefile --- linux.21pre4/arch/parisc/kernel/Makefile 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/Makefile 2003-01-06 15:38:40.000000000 +0000 @@ -24,7 +24,7 @@ pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \ ptrace.o hardware.o inventory.o drivers.o semaphore.o \ signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \ - processor.o power.o + processor.o power.o pdc_chassis.o export-objs := parisc_ksyms.o superio.o keyboard.o diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/parisc_ksyms.c linux.21pre4-ac2/arch/parisc/kernel/parisc_ksyms.c --- linux.21pre4/arch/parisc/kernel/parisc_ksyms.c 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/parisc_ksyms.c 2003-01-06 15:38:40.000000000 +0000 @@ -232,3 +232,11 @@ EXPORT_SYMBOL_NOVERS($$dyncall); #endif +#ifdef CONFIG_SMP +#ifdef CONFIG_DEBUG_SPINLOCK +#include +EXPORT_SYMBOL(spin_lock); +EXPORT_SYMBOL(spin_unlock); +EXPORT_SYMBOL(spin_trylock); +#endif +#endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/pci.c linux.21pre4-ac2/arch/parisc/kernel/pci.c --- linux.21pre4/arch/parisc/kernel/pci.c 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/pci.c 2003-01-06 15:38:40.000000000 +0000 @@ -306,13 +306,13 @@ (32) for primary and secondary buses. */ pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32); - /* Read bridge control */ + /* Read bridge control - force SERR/PERR on */ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bus->bridge_ctl); + bus->bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bus->bridge_ctl); } - /* Set FBB bit for now. Disable ISA IO forwarding. Enable PERR/SERR */ - bus->bridge_ctl |= PCI_BRIDGE_CTL_FAST_BACK | - PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR; + bus->bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR; } @@ -390,7 +390,7 @@ */ void __devinit pcibios_align_resource(void *data, struct resource *res, - unsigned long size, unsigned long alignment) + unsigned long size, unsigned long alignment) { unsigned long mask, align; @@ -407,7 +407,7 @@ align = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; /* Align to largest of MIN or input size */ - mask = MAX(size, align) - 1; + mask = MAX(alignment, align) - 1; res->start += mask; res->start &= ~mask; @@ -420,7 +420,7 @@ int __devinit -pcibios_enable_device(struct pci_dev *dev) +pcibios_enable_device(struct pci_dev *dev, int mask) { u16 cmd; int idx; @@ -438,6 +438,11 @@ */ for (idx=0; idxresource[idx]; + + /* only setup requested resources */ + if (!(mask & (1<flags & IORESOURCE_IO) cmd |= PCI_COMMAND_IO; if (r->flags & IORESOURCE_MEM) @@ -454,9 +459,11 @@ */ cmd |= (PCI_COMMAND_SERR | PCI_COMMAND_PARITY); +#if 0 /* If bridge/bus controller has FBB enabled, child must too. */ if (dev->bus->bridge_ctl & PCI_BRIDGE_CTL_FAST_BACK) cmd |= PCI_COMMAND_FAST_BACK; +#endif DBGC("PCIBIOS: Enabling device %s cmd 0x%04x\n", dev->slot_name, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd); @@ -478,6 +485,19 @@ #endif } +static void __devinit +pcibios_enable_ppb(struct pci_bus *bus) +{ + struct list_head *list; + + /* find a leaf of the PCI bus tree. */ + list_for_each(list, &bus->children) + pcibios_enable_ppb(pci_bus_b(list)); + + if (bus->self && (bus->self->class >> 8) == PCI_CLASS_BRIDGE_PCI) + pdev_enable_device(bus->self); +} + /* ** Mostly copied from drivers/pci/setup-bus.c:pci_assign_unassigned_resources() @@ -486,16 +506,13 @@ pcibios_assign_unassigned_resources(struct pci_bus *bus) { /* from drivers/pci/setup-bus.c */ - extern void pbus_assign_resources(struct pci_bus *bus, struct pbus_set_ranges_data *ranges); + extern void pbus_size_bridges(struct pci_bus *bus); + extern void pbus_assign_resources(struct pci_bus *bus); - struct pbus_set_ranges_data ranges; + pbus_size_bridges(bus); + pbus_assign_resources(bus); - ranges.io_end = ranges.io_start - = bus->resource[0]->start + PCIBIOS_MIN_IO; - ranges.mem_end = ranges.mem_start - = bus->resource[1]->start + PCIBIOS_MIN_MEM; - ranges.found_vga = 0; - pbus_assign_resources(bus, &ranges); + pcibios_enable_ppb(bus); } /* diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/pdc_chassis.c linux.21pre4-ac2/arch/parisc/kernel/pdc_chassis.c --- linux.21pre4/arch/parisc/kernel/pdc_chassis.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/arch/parisc/kernel/pdc_chassis.c 2003-01-06 15:38:40.000000000 +0000 @@ -0,0 +1,205 @@ +/* + * arch/parisc/kernel/pdc_chassis.c + * + * Copyright (C) 2002 Laurent Canet + * Copyright (C) 2002 Thibaut Varene + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#undef PDC_CHASSIS_DEBUG +#ifdef PDC_CHASSIS_DEBUG +#define DPRINTK(fmt, args...) printk(fmt, ## args) +#else +#define DPRINTK(fmt, args...) +#endif + +#include +#include +#include +#include + +#include +#include + +static int pdc_chassis_old = 0; + + +/** + * pdc_chassis_checkold() - Checks for old PDC_CHASSIS compatibility + * @pdc_chassis_old: 1 if old pdc chassis style + * + * Currently, only E class and A180 are known to work with this. + * Inspired by Christoph Plattner + */ + +static void __init pdc_chassis_checkold(void) +{ + switch(CPU_HVERSION) { + case 0x480: /* E25 */ + case 0x481: /* E35 */ + case 0x482: /* E45 */ + case 0x483: /* E55 */ + case 0x516: /* A180 */ + pdc_chassis_old = 1; + break; + + default: + break; + } + DPRINTK(KERN_DEBUG "%s: pdc_chassis_checkold(); pdc_chassis_old = %d\n", __FILE__, pdc_chassis_old); +} + + +/** + * pdc_chassis_panic_event() - Called by the panic handler. + * + * As soon as a panic occurs, we should inform the PDC. + */ + +static int pdc_chassis_panic_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); + return NOTIFY_DONE; +} + + +static struct notifier_block pdc_chassis_panic_block = { + notifier_call: pdc_chassis_panic_event, + priority: INT_MAX, +}; + + +/** + * parisc_reboot_event() - Called by the reboot handler. + * + * As soon as a reboot occurs, we should inform the PDC. + */ + +static int pdc_chassis_reboot_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN); + return NOTIFY_DONE; +} + + +static struct notifier_block pdc_chassis_reboot_block = { + notifier_call: pdc_chassis_reboot_event, + priority: INT_MAX, +}; + + +/** + * parisc_pdc_chassis_init() - Called at boot time. + */ + +void __init parisc_pdc_chassis_init(void) +{ + DPRINTK(KERN_DEBUG "%s: parisc_pdc_chassis_init()\n", __FILE__); + + /* initialize panic notifier chain */ + notifier_chain_register(&panic_notifier_list, &pdc_chassis_panic_block); + + /* initialize reboot notifier chain */ + register_reboot_notifier(&pdc_chassis_reboot_block); + + /* Check for old LED Panel */ + pdc_chassis_checkold(); +} + + +/** + * pdc_chassis_send_status() - Sends a predefined message to the chassis, + * and changes the front panel LEDs according to the new system state + * @retval: PDC call return value. + * + * Only machines with 64 bits PDC PAT and E-class are supported atm. + * + * returns 0 if no error, -1 if no supported PDC is present or invalid message, + * else returns the appropriate PDC error code. + * + * For a list of predefined messages, see asm-parisc/pdc_chassis.h + */ + +int pdc_chassis_send_status(int message) +{ + /* Maybe we should do that in an other way ? */ + int retval = 0; + + DPRINTK(KERN_DEBUG "%s: pdc_chassis_send_status(%d)\n", __FILE__, message); + +#ifdef __LP64__ /* pdc_pat_chassis_send_log is defined only when #ifdef __LP64__ */ + if (is_pdc_pat()) { + switch(message) { + case PDC_CHASSIS_DIRECT_BSTART: + retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BSTART, PDC_CHASSIS_LSTATE_RUN_NORMAL); + break; + + case PDC_CHASSIS_DIRECT_BCOMPLETE: + retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BCOMPLETE, PDC_CHASSIS_LSTATE_RUN_NORMAL); + break; + + case PDC_CHASSIS_DIRECT_SHUTDOWN: + retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_SHUTDOWN, PDC_CHASSIS_LSTATE_NONOS); + break; + + case PDC_CHASSIS_DIRECT_PANIC: + retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_PANIC, PDC_CHASSIS_LSTATE_RUN_CRASHREC); + break; + + case PDC_CHASSIS_DIRECT_LPMC: + retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_LPMC, PDC_CHASSIS_LSTATE_RUN_SYSINT); + break; + + case PDC_CHASSIS_DIRECT_HPMC: + retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_HPMC, PDC_CHASSIS_LSTATE_RUN_NCRIT); + break; + + default: + retval = -1; + } + } else retval = -1; +#else + if (pdc_chassis_old) { + switch (message) { + case PDC_CHASSIS_DIRECT_BSTART: + case PDC_CHASSIS_DIRECT_BCOMPLETE: + retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_RUN)); + break; + + case PDC_CHASSIS_DIRECT_SHUTDOWN: + retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_SHUT)); + break; + + case PDC_CHASSIS_DIRECT_HPMC: + case PDC_CHASSIS_DIRECT_PANIC: + retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_FLT)); + break; + + case PDC_CHASSIS_DIRECT_LPMC: + retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_WARN)); + break; + + default: + retval = -1; + } + } else retval = -1; +#endif + + return retval; +} diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/pdc_cons.c linux.21pre4-ac2/arch/parisc/kernel/pdc_cons.c --- linux.21pre4/arch/parisc/kernel/pdc_cons.c 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/pdc_cons.c 2003-01-06 15:38:40.000000000 +0000 @@ -51,33 +51,33 @@ return 0; } -#ifdef CONFIG_PDC_CONSOLE +#if defined(CONFIG_PDC_CONSOLE) || defined(CONFIG_SERIAL_MUX) +#define PDC_CONSOLE_DEVICE pdc_console_device static kdev_t pdc_console_device (struct console *c) { - return MKDEV(PDCCONS_MAJOR, 0); + return MKDEV(MUX_MAJOR, 0); } -#endif - -#ifdef CONFIG_PDC_CONSOLE -#define PDC_CONSOLE_DEVICE pdc_console_device -#else +#else #define PDC_CONSOLE_DEVICE NULL #endif static struct console pdc_cons = { name: "ttyB", write: pdc_console_write, - device: PDC_CONSOLE_DEVICE, +#warning UPSTREAM 2.4.19 removed the next 4 lines but we did not + read: NULL, + device: PDC_CONSOLE_DEVICE, + unblank: NULL, setup: pdc_console_setup, flags: CON_BOOT|CON_PRINTBUFFER|CON_ENABLED, index: -1, }; static int pdc_console_initialized; - extern unsigned long con_start; /* kernel/printk.c */ extern unsigned long log_end; /* kernel/printk.c */ - + + static void pdc_console_init_force(void) { if (pdc_console_initialized) @@ -94,7 +94,7 @@ void pdc_console_init(void) { -#if defined(EARLY_BOOTUP_DEBUG) || defined(CONFIG_PDC_CONSOLE) +#if defined(EARLY_BOOTUP_DEBUG) || defined(CONFIG_PDC_CONSOLE) || defined(CONFIG_SERIAL_MUX) pdc_console_init_force(); #endif #ifdef EARLY_BOOTUP_DEBUG diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/power.c linux.21pre4-ac2/arch/parisc/kernel/power.c --- linux.21pre4/arch/parisc/kernel/power.c 2003-01-29 16:27:19.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/power.c 2003-01-06 15:38:40.000000000 +0000 @@ -44,15 +44,12 @@ #include #include #include -#include -#include #include #include #include #include #include -#include #include @@ -144,11 +141,7 @@ DECLARE_TASKLET_DISABLED(power_tasklet, NULL, 0); /* soft power switch enabled/disabled */ -#ifdef CONFIG_PROC_FS -static int pwrsw_enabled = 1; -#else -#define pwrsw_enabled (1) -#endif +int pwrsw_enabled = 1; /* * On gecko style machines (e.g. 712/xx and 715/xx) @@ -209,90 +202,6 @@ -/* - * /proc filesystem support - */ - -#ifdef CONFIG_SYSCTL -static int power_proc_read(char *page, char **start, off_t off, int count, - int *eof, void *data) -{ - char *out = page; - int len; - - out += sprintf(out, "Software power switch support: "); - out += sprintf(out, pwrsw_enabled ? "enabled (1)" : "disabled (0)" ); - out += sprintf(out, "\n"); - - len = out - page - off; - if (len < count) { - *eof = 1; - if (len <= 0) return 0; - } else { - len = count; - } - *start = page + off; - return len; -} - -static int power_proc_write(struct file *file, const char *buf, - unsigned long count, void *data) -{ - char *cur, lbuf[count]; - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - - memset(lbuf, 0, count); - - copy_from_user(lbuf, buf, count); - cur = lbuf; - - /* skip initial spaces */ - while (*cur && isspace(*cur)) - cur++; - - switch (*cur) { - case '0': pwrsw_enabled = 0; - break; - case '1': pwrsw_enabled = 1; - break; - default: printk(KERN_CRIT "/proc/" SYSCTL_FILENAME - ": Parse error: only '0' or '1' allowed!\n"); - return -EINVAL; - } /* switch() */ - - return count; -} - -static struct proc_dir_entry *ent; - -static void __init power_create_procfs(void) -{ - if (!power_tasklet.func) - return; - - ent = create_proc_entry(SYSCTL_FILENAME, S_IFREG|S_IRUGO|S_IWUSR, 0); - if (!ent) return; - - ent->nlink = 1; - ent->read_proc = power_proc_read; - ent->write_proc = power_proc_write; - ent->owner = THIS_MODULE; -} - -static void __exit power_remove_procfs(void) -{ - remove_proc_entry(SYSCTL_FILENAME, NULL); -} - -#else -#define power_create_procfs() do { } while (0) -#define power_remove_procfs() do { } while (0) -#endif /* CONFIG_SYSCTL */ - - - /* parisc_panic_event() is called by the panic handler. * As soon as a panic occurs, our tasklets above will not be * executed any longer. This function then re-enables the @@ -346,7 +255,6 @@ /* Register a call for panic conditions. */ notifier_chain_register(&panic_notifier_list, &parisc_panic_block); - power_create_procfs(); tasklet_enable(&power_tasklet); return 0; @@ -359,7 +267,6 @@ tasklet_disable(&power_tasklet); notifier_chain_unregister(&panic_notifier_list, &parisc_panic_block); - power_remove_procfs(); power_tasklet.func = NULL; pdc_soft_power_button(0); } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/process.c linux.21pre4-ac2/arch/parisc/kernel/process.c --- linux.21pre4/arch/parisc/kernel/process.c 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/process.c 2003-01-06 15:38:40.000000000 +0000 @@ -36,6 +36,7 @@ #include #include #include +#include int hlt_counter; @@ -118,6 +119,9 @@ /* "Normal" system reset */ pdc_do_reset(); + /* set up a new led state on systems shipped with a LED State panel */ + pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN); + /* Nope...box should reset with just CMD_RESET now */ gsc_writel(CMD_RESET, COMMAND_GLOBAL); @@ -150,6 +154,8 @@ * following call will immediately power off. */ pdc_soft_power_button(0); + pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN); + /* It seems we have no way to power the system off via * software. The user has to press the button himself. */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/processor.c linux.21pre4-ac2/arch/parisc/kernel/processor.c --- linux.21pre4/arch/parisc/kernel/processor.c 2003-01-29 16:27:19.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/processor.c 2003-01-06 15:38:40.000000000 +0000 @@ -1,4 +1,4 @@ -/* $Id: processor.c,v 1.13 2002/07/04 19:33:01 grundler Exp $ +/* $Id: processor.c,v 1.14 2002/09/13 06:46:28 grundler Exp $ * * Initial setup-routines for HP 9000 based hardware. * @@ -77,6 +77,8 @@ unsigned long txn_addr; unsigned long cpuid; struct cpuinfo_parisc *p; + extern struct irq_region_ops cpu_irq_ops; /* arch/parisc...irq.c */ + extern struct irqaction cpu_irq_actions[]; /* arch/parisc...irq.c */ #ifndef CONFIG_SMP if (boot_cpu_data.cpu_count > 0) { @@ -167,12 +169,24 @@ ** p->state = STATE_RENDEZVOUS; */ - /* - ** itimer and ipi IRQ handlers are statically initialized in - ** arch/parisc/kernel/irq.c. ie Don't need to register them. - */ - p->region = irq_region[IRQ_FROM_REGION(CPU_IRQ_REGION)]; +#if 0 + /* CPU 0 IRQ table is statically allocated/initialized */ + if (cpuid) { + struct irqaction actions[]; + + /* + ** itimer and ipi IRQ handlers are statically initialized in + ** arch/parisc/kernel/irq.c. ie Don't need to register them. + */ + actions = kmalloc(sizeof(struct irqaction)*MAX_CPU_IRQ, GFP_ATOMIC); + if (!actions) { + /* not getting it's own table, share with monarch */ + actions = cpu_irq_actions[0]; + } + cpu_irq_actions[cpuid] = actions; + } +#endif return 0; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/sba_iommu.c linux.21pre4-ac2/arch/parisc/kernel/sba_iommu.c --- linux.21pre4/arch/parisc/kernel/sba_iommu.c 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/sba_iommu.c 2003-01-06 15:38:40.000000000 +0000 @@ -812,10 +812,8 @@ return(0); } - dev->dma_mask = mask; /* save it */ - /* only support 32-bit PCI devices - no DAC support (yet) */ - return((int) (mask == 0xffffffff)); + return((int) (mask == 0xffffffffUL)); } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/setup.c linux.21pre4-ac2/arch/parisc/kernel/setup.c --- linux.21pre4/arch/parisc/kernel/setup.c 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/setup.c 2003-01-06 15:38:40.000000000 +0000 @@ -41,6 +41,7 @@ #include #include #include +#include #include /* for pa7300lc_init() proto */ #define COMMAND_LINE_SIZE 1024 @@ -283,6 +284,11 @@ parisc_init_resources(); do_device_inventory(); /* probe for hardware */ + parisc_pdc_chassis_init(); + + /* set up a new led state on systems shipped LED State panel */ + pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BSTART); + processor_init(); printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n", boot_cpu_data.cpu_count, diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/syscall.S linux.21pre4-ac2/arch/parisc/kernel/syscall.S --- linux.21pre4/arch/parisc/kernel/syscall.S 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/syscall.S 2003-01-06 15:38:40.000000000 +0000 @@ -593,11 +593,18 @@ ENTRY_DIFF(ftruncate64) /* 200 */ ENTRY_SAME(getdents64) ENTRY_DIFF(fcntl64) - ENTRY_SAME(ni_syscall) /* attrctl */ - ENTRY_SAME(ni_syscall) /* acl_get */ - ENTRY_SAME(ni_syscall) /* acl_set */ - ENTRY_SAME(gettid) +#ifdef CONFIG_XFS_FS + ENTRY_SAME(attrctl) + ENTRY_SAME(acl_get) + ENTRY_SAME(acl_set) /* 205 */ +#else + ENTRY_SAME(ni_syscall) + ENTRY_SAME(ni_syscall) + ENTRY_SAME(ni_syscall) /* 205 */ +#endif + ENTRY_SAME(gettid) ENTRY_SAME(readahead) + ENTRY_SAME(tkill) .end diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/sys_parisc32.c linux.21pre4-ac2/arch/parisc/kernel/sys_parisc32.c --- linux.21pre4/arch/parisc/kernel/sys_parisc32.c 2003-01-29 16:27:19.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/sys_parisc32.c 2003-01-06 15:38:40.000000000 +0000 @@ -2759,20 +2759,9 @@ asmlinkage long sys32_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg) { - switch (cmd) { - case F_GETLK64: - cmd = F_GETLK; - break; - case F_SETLK64: - cmd = F_SETLK; - break; - case F_SETLKW64: - cmd = F_SETLKW; - break; - default: - break; - } - return sys_fcntl(fd, cmd, arg); + if (cmd >= F_GETLK64 && cmd <= F_SETLKW64) + return sys_fcntl(fd, cmd + F_GETLK - F_GETLK64, arg); + return sys32_fcntl(fd, cmd, arg); } asmlinkage int sys32_pread(int fd, void *buf, size_t count, unsigned int high, unsigned int low) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/traps.c linux.21pre4-ac2/arch/parisc/kernel/traps.c --- linux.21pre4/arch/parisc/kernel/traps.c 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/traps.c 2003-01-06 15:38:40.000000000 +0000 @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -34,17 +35,10 @@ #include #include #include +#include #include "../math-emu/math-emu.h" /* for handle_fpe() */ -#ifdef CONFIG_KWDB -#include /* for BI2_KGDB_GDB */ -#include /* for __() */ -#include /* for struct save_state */ -#include /* for pt_regs_to_ssp and ssp_to_pt_regs */ -#include /* for I_BRK_INST */ -#endif /* CONFIG_KWDB */ - #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */ /* dumped to the console via printk) */ @@ -66,6 +60,99 @@ #define RFMT "%08lx" #endif +static int kstack_depth_to_print = 24; +extern struct module *module_list; +extern struct module kernel_module; + +static inline int kernel_text_address(unsigned long addr) +{ +#ifdef CONFIG_MODULES + int retval = 0; + struct module *mod; +#endif + extern char _stext, _etext; + + if (addr >= (unsigned long) &_stext && + addr <= (unsigned long) &_etext) + return 1; + +#ifdef CONFIG_MODULES + for (mod = module_list; mod != &kernel_module; mod = mod->next) { + /* mod_bound tests for addr being inside the vmalloc'ed + * module area. Of course it'd be better to test only + * for the .text subset... */ + if (mod_bound(addr, 0, mod)) { + retval = 1; + break; + } + } + return retval; +#endif +} + + +void show_trace(unsigned long * stack) +{ + unsigned long *startstack; + unsigned long addr; + int i; + + startstack = (unsigned long *)((unsigned long)stack & ~(THREAD_SIZE - 1)); + i = 1; + printk("Kernel addresses on the stack:\n"); + while (stack >= startstack) { + addr = *stack--; + if (kernel_text_address(addr)) { + printk(" [<" RFMT ">] ", addr); + if ((i & 0x03) == 0) + printk("\n"); + i++; + } + } + printk("\n"); +} + +void show_trace_task(struct task_struct *tsk) +{ + show_trace((unsigned long *)tsk->thread.regs.ksp); +} + +void show_stack(unsigned long * sp) +{ + unsigned long *stack; + int i; + + /* + * debugging aid: "show_stack(NULL);" prints the + * back trace for this cpu. + */ + if (sp==NULL) + sp = (unsigned long*)&sp; + + stack = sp; + printk("\n" KERN_CRIT "Stack Dump:\n"); + printk(KERN_CRIT " " RFMT ": ", (unsigned long) stack); + for (i=0; i < kstack_depth_to_print; i++) { + if (((long) stack & (THREAD_SIZE-1)) == 0) + break; + if (i && ((i & 0x03) == 0)) + printk("\n" KERN_CRIT " " RFMT ": ", + (unsigned long) stack); + printk(RFMT " ", *stack--); + } + printk("\n" KERN_CRIT "\n"); + show_trace(sp); +} + +/* + * The architecture-independent backtrace generator + */ +void dump_stack(void) +{ + show_stack(0); +} + + void show_regs(struct pt_regs *regs) { int i; @@ -121,84 +208,6 @@ } -static void dump_stack(unsigned long from, unsigned long to, int istackflag) -{ - unsigned int *fromptr; - unsigned int *toptr; - - fromptr = (unsigned int *)from; - toptr = (unsigned int *)to; - - printk("\n"); - printk(KERN_CRIT "Dumping %sStack from 0x%p to 0x%p:\n", - istackflag ? "Interrupt " : "", - fromptr, toptr); - - while (fromptr < toptr) { - printk(KERN_CRIT "%04lx %08x %08x %08x %08x %08x %08x %08x %08x\n", - ((unsigned long)fromptr) & 0xffff, - fromptr[0], fromptr[1], fromptr[2], fromptr[3], - fromptr[4], fromptr[5], fromptr[6], fromptr[7]); - fromptr += 8; - } -} - - -void show_stack(struct pt_regs *regs) -{ -#if 1 - /* If regs->sr[7] == 0, we are on a kernel stack */ - if (regs->sr[7] == 0) { - - unsigned long sp = regs->gr[30]; - unsigned long cr30; - unsigned long cr31; - unsigned long stack_start; - struct pt_regs *int_regs; - - cr30 = mfctl(30); - cr31 = mfctl(31); - stack_start = sp & ~(ISTACK_SIZE - 1); - if (stack_start == cr31) { - /* - * We are on the interrupt stack, get the stack - * pointer from the first pt_regs structure on - * the interrupt stack, so we can dump the task - * stack first. - */ - - int_regs = (struct pt_regs *)cr31; - sp = int_regs->gr[30]; - stack_start = sp & ~(INIT_TASK_SIZE - 1); - if (stack_start != cr30) { - printk(KERN_CRIT "WARNING! Interrupt-Stack pointer and cr30 do not correspond!\n"); - printk(KERN_CRIT "Dumping virtual address stack instead\n"); - dump_stack((unsigned long)__va(stack_start), (unsigned long)__va(sp), 0); - } else { - dump_stack(stack_start, sp, 0); - }; - - printk("\n\n" KERN_DEBUG "Registers at Interrupt:\n"); - show_regs(int_regs); - - /* Now dump the interrupt stack */ - - sp = regs->gr[30]; - stack_start = sp & ~(ISTACK_SIZE - 1); - dump_stack(stack_start,sp,1); - } - else - { - /* Stack Dump! */ - printk(KERN_CRIT "WARNING! Stack pointer and cr30 do not correspond!\n"); - printk(KERN_CRIT "Dumping virtual address stack instead\n"); - dump_stack((unsigned long)__va(stack_start), (unsigned long)__va(sp), 0); - } - } -#endif -} - - void die_if_kernel(char *str, struct pt_regs *regs, long err) { if (user_mode(regs)) { @@ -260,9 +269,6 @@ void handle_break(unsigned iir, struct pt_regs *regs) { struct siginfo si; -#ifdef CONFIG_KWDB - struct save_state ssp; -#endif /* CONFIG_KWDB */ switch(iir) { case 0x00: @@ -285,26 +291,6 @@ handle_gdb_break(regs, TRAP_BRKPT); break; -#ifdef CONFIG_KWDB - case KGDB_BREAK_INSN: - mtctl(0, 15); - pt_regs_to_ssp(regs, &ssp); - kgdb_trap(I_BRK_INST, &ssp, 1); - ssp_to_pt_regs(&ssp, regs); - break; - - case KGDB_INIT_BREAK_INSN: - mtctl(0, 15); - pt_regs_to_ssp(regs, &ssp); - kgdb_trap(I_BRK_INST, &ssp, 1); - ssp_to_pt_regs(&ssp, regs); - - /* Advance pcoq to skip break */ - regs->iaoq[0] = regs->iaoq[1]; - regs->iaoq[1] += 4; - break; -#endif /* CONFIG_KWDB */ - default: #ifdef PRINT_USER_FAULTS printk(KERN_DEBUG "break %#08x: pid=%d command='%s'\n", @@ -335,20 +321,6 @@ void (*cpu_lpmc) (int code, struct pt_regs *regs) = default_trap; -#ifdef CONFIG_KWDB -int debug_call (void) -{ - printk (KERN_DEBUG "Debug call.\n"); - return 0; -} - -int debug_call_leaf (void) -{ - return 0; -} -#endif /* CONFIG_KWDB */ - - void transfer_pim_to_trap_frame(struct pt_regs *regs) { register int i; @@ -425,7 +397,7 @@ /* - * This routine handles page faults. It determines the address, + * This routine handles various exception codes. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. */ @@ -444,10 +416,20 @@ if (!console_drivers) pdc_console_restart(); - if (code == 1) - transfer_pim_to_trap_frame(regs); - show_stack(regs); + /* Not all switch paths will gutter the processor... */ + switch(code){ + + case 1: + transfer_pim_to_trap_frame(regs); + break; + + default: + /* Fall through */ + break; + } + + show_stack((unsigned long *)regs->gr[30]); printk("\n"); printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n", @@ -461,32 +443,34 @@ * system will shut down immediately right here. */ pdc_soft_power_button(0); + /* Gutter the processor... */ for(;;) ; } + void handle_interruption(int code, struct pt_regs *regs) { unsigned long fault_address = 0; unsigned long fault_space = 0; struct siginfo si; -#ifdef CONFIG_KWDB - struct save_state ssp; -#endif /* CONFIG_KWDB */ - - if (code == 1) - pdc_console_restart(); /* switch back to pdc if HPMC */ - else - sti(); -#if 0 - printk(KERN_CRIT "Interruption # %d\n", code); -#endif + /* HACK! jsm is going to fix this. + * entry.S will manage I-bit - only enable I-bit if it was + * enabled before we took the "trap". + */ + if (code != 1) + local_irq_enable(); switch(code) { case 1: /* High-priority machine check (HPMC) */ + pdc_console_restart(); /* switch back to pdc if HPMC */ + + /* set up a new led state on systems shipped with a LED State panel */ + pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC); + parisc_terminate("High Priority Machine Check (HPMC)", regs, code, 0); /* NOT REACHED */ @@ -506,6 +490,9 @@ case 5: /* Low-priority machine check */ + + pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC); + flush_all_caches(); cpu_lpmc(5, regs); return; @@ -554,6 +541,7 @@ die_if_kernel("Privileged register usage", regs, code); si.si_code = ILL_PRVREG; + /* Fall thru */ give_sigill: si.si_signo = SIGILL; si.si_errno = 0; @@ -561,17 +549,47 @@ force_sig_info(SIGILL, &si, current); return; + case 12: + /* Overflow Trap, let the userland signal handler do the cleanup */ + si.si_signo = SIGFPE; + si.si_code = FPE_INTOVF; + si.si_addr = (void *) regs->iaoq[0]; + force_sig_info(SIGFPE, &si, current); + return; + + case 13: + /* Conditional Trap + The condition succees in an instruction which traps on condition */ + si.si_signo = SIGFPE; + /* Set to zero, and let the userspace app figure it out from + the insn pointed to by si_addr */ + si.si_code = 0; + si.si_addr = (void *) regs->iaoq[0]; + force_sig_info(SIGFPE, &si, current); + return; + case 14: /* Assist Exception Trap, i.e. floating point exception. */ die_if_kernel("Floating point exception", regs, 0); /* quiet */ handle_fpe(regs); return; + case 15: + /* Data TLB miss fault/Data page fault */ + /* Fall thru */ + case 16: + /* Non-access instruction TLB miss fault */ + /* The instruction TLB entry needed for the target address of the FIC + is absent, and hardware can't find it, so we get to cleanup */ + /* Fall thru */ case 17: /* Non-access data TLB miss fault/Non-access data page fault */ /* TODO: Still need to add slow path emulation code here */ + /* TODO: Understand what is meant by the TODO listed + above this one. (Carlos) */ fault_address = regs->ior; - parisc_terminate("Non access data tlb fault!",regs,code,fault_address); + fault_space = regs->isr; + break; case 18: /* PCXS only -- later cpu's split this into types 26,27 & 28 */ @@ -581,9 +599,8 @@ return; } /* Fall Through */ - - case 15: /* Data TLB miss fault/Data page fault */ - case 26: /* PCXL: Data memory access rights trap */ + case 26: + /* PCXL: Data memory access rights trap */ fault_address = regs->ior; fault_space = regs->isr; break; @@ -599,7 +616,6 @@ case 25: /* Taken branch trap */ -#ifndef CONFIG_KWDB regs->gr[0] &= ~PSW_T; if (regs->iasq[0]) handle_gdb_break(regs, TRAP_BRANCH); @@ -607,14 +623,6 @@ * run. */ return; -#else - /* Kernel debugger: */ - mtctl(0, 15); - pt_regs_to_ssp(regs, &ssp); - kgdb_trap(I_TAKEN_BR, &ssp, 1); - ssp_to_pt_regs(&ssp, regs); - break; -#endif /* CONFIG_KWDB */ case 7: /* Instruction access rights */ @@ -648,7 +656,6 @@ up_read(¤t->mm->mmap_sem); } /* Fall Through */ - case 27: /* Data memory protection ID trap */ die_if_kernel("Protection id trap", regs, code); @@ -682,6 +689,9 @@ force_sig_info(SIGBUS, &si, current); return; } + + pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); + parisc_terminate("Unexpected interruption", regs, code, 0); /* NOT REACHED */ } @@ -710,24 +720,17 @@ * The kernel should never fault on its own address space. */ - if (fault_space == 0) - parisc_terminate("Kernel Fault", regs, code, fault_address); + if (fault_space == 0) { + pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); + parisc_terminate("Kernel Fault", regs, code, fault_address); + /** NOT REACHED **/ + } } -#ifdef CONFIG_KWDB - debug_call_leaf (); -#endif /* CONFIG_KWDB */ - do_page_fault(regs, code, fault_address); } -void show_trace_task(struct task_struct *tsk) -{ - BUG(); -} - - int __init check_ivt(void *iva) { diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/kernel/unaligned.c linux.21pre4-ac2/arch/parisc/kernel/unaligned.c --- linux.21pre4/arch/parisc/kernel/unaligned.c 2003-01-29 16:27:19.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/kernel/unaligned.c 2003-01-06 15:38:40.000000000 +0000 @@ -1,4 +1,4 @@ -/* $Id: unaligned.c,v 1.9 2001/10/04 03:31:08 tausq Exp $ +/* $Id: unaligned.c,v 1.10 2002/09/22 02:21:05 tausq Exp $ * * Unaligned memory access handler * @@ -108,7 +108,7 @@ #define OPCODE_STW_L OPCODE4(0x1A) #define OPCODE_STW_L2 OPCODE4(0x1B) - +int unaligned_enabled = 1; void die_if_kernel (char *str, struct pt_regs *regs, long err); @@ -281,6 +281,9 @@ } } + if (!unaligned_enabled) + goto force_sigbus; + /* TODO: make this cleaner... */ switch (regs->iir & OPCODE1_MASK) { @@ -368,7 +371,7 @@ { printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret); die_if_kernel("Unaligned data reference", regs, 28); - +force_sigbus: /* couldn't handle it ... */ si.si_signo = SIGBUS; si.si_errno = 0; @@ -432,3 +435,4 @@ return (int)(regs->ior & align_mask); } + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/lib/locks.c linux.21pre4-ac2/arch/parisc/lib/locks.c --- linux.21pre4/arch/parisc/lib/locks.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/arch/parisc/lib/locks.c 2003-01-06 15:38:40.000000000 +0000 @@ -0,0 +1,68 @@ +/* + * debugging spinlocks for parisc + * + * Adapted from the ppc version + */ + + +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_DEBUG_SPINLOCK + +#undef INIT_STUCK +#define INIT_STUCK 200000000 /*0xffffffff*/ + +#define __spin_trylock(x) (__ldcw(&(x)->lock) != 0) + +void spin_lock(spinlock_t *lock) +{ + int cpu = smp_processor_id(); + unsigned int stuck = INIT_STUCK; + while (!__spin_trylock(lock)) { + while ((unsigned volatile long)lock->lock == 0) { + if (!--stuck) { + printk("spin_lock(%p) CPU#%d NIP %p" + " holder: cpu %ld pc %08lX\n", + lock, cpu, __builtin_return_address(0), + lock->owner_cpu,lock->owner_pc); + stuck = INIT_STUCK; + /* steal the lock */ + /*xchg_u32((void *)&lock->lock,0);*/ + } + } + } + lock->owner_pc = (unsigned long)__builtin_return_address(0); + lock->owner_cpu = cpu; +} + +int spin_trylock(spinlock_t *lock) +{ + if (!__spin_trylock(lock)) + return 0; + lock->owner_cpu = smp_processor_id(); + lock->owner_pc = (unsigned long)__builtin_return_address(0); + return 1; +} + +void spin_unlock(spinlock_t *lp) +{ + if ( lp->lock ) + printk("spin_unlock(%p): no lock cpu %d curr PC %p %s/%d\n", + lp, smp_processor_id(), __builtin_return_address(0), + current->comm, current->pid); + if ( lp->owner_cpu != smp_processor_id() ) + printk("spin_unlock(%p): cpu %d trying clear of cpu %d pc %lx val %x\n", + lp, smp_processor_id(), (int)lp->owner_cpu, + lp->owner_pc,lp->lock); + lp->owner_pc = lp->owner_cpu = 0; + wmb(); + lp->lock = 1; +} + +#endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/lib/Makefile linux.21pre4-ac2/arch/parisc/lib/Makefile --- linux.21pre4/arch/parisc/lib/Makefile 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/lib/Makefile 2003-01-06 15:38:40.000000000 +0000 @@ -6,4 +6,6 @@ L_TARGET = lib.a obj-y := lusercopy.o bitops.o checksum.o io.o memset.o +obj-$(CONFIG_SMP) += locks.o + include $(TOPDIR)/Rules.make diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/Makefile linux.21pre4-ac2/arch/parisc/Makefile --- linux.21pre4/arch/parisc/Makefile 2003-01-29 16:27:19.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/Makefile 2003-01-06 15:38:40.000000000 +0000 @@ -91,14 +91,6 @@ SUBDIRS := $(SUBDIRS) arch/parisc/math-emu DRIVERS := $(DRIVERS) arch/parisc/math-emu/math.o -ifdef CONFIG_KWDB -SUBDIRS := $(SUBDIRS) arch/parisc/kdb -DRIVERS := $(DRIVERS) arch/parisc/kdb/kdb.o - -arch/parisc/kdb: dummy - $(MAKE) linuxsubdirs SUBDIRS=arch/parisc/kdb -endif - arch/parisc/kernel: dummy $(MAKE) linuxsubdirs SUBDIRS=arch/parisc/kernel @@ -106,6 +98,21 @@ $(MAKE) linuxsubdirs SUBDIRS=arch/parisc/mm palo: vmlinux + @if [ $$(palo -f /dev/null >/dev/null 2>&1 ; echo $$?) != 2 ]; then \ + echo 'ERROR: Please install palo first (apt-get install palo)';\ + echo 'or build it from source and install it somewhere in your $$PATH';\ + false; \ + fi + @if [ ! -f ./palo.conf ]; then \ + cp arch/parisc/defpalo.conf palo.conf; \ + echo 'A generic palo config file (./palo.conf) has been created for you.'; \ + echo 'You should check it and re-run "make palo".'; \ + echo 'WARNING: the "lifimage" file is now placed in this directory by default!'; \ + false; \ + fi + palo -f ./palo.conf + +oldpalo: vmlinux export TOPDIR=`pwd`; \ unset STRIP LDFLAGS CPP CPPFLAGS AFLAGS CFLAGS CC LD; cd ../palo && make lifimage diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/mm/fault.c linux.21pre4-ac2/arch/parisc/mm/fault.c --- linux.21pre4/arch/parisc/mm/fault.c 2003-01-29 16:27:19.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/mm/fault.c 2003-01-06 15:38:40.000000000 +0000 @@ -257,7 +257,8 @@ } parisc_terminate("Bad Address (null pointer deref?)", regs, code, address); - + /* NOT REACHED! */ + out_of_memory: up_read(&mm->mmap_sem); printk(KERN_CRIT "VM: killing process %s\n", current->comm); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/mm/init.c linux.21pre4-ac2/arch/parisc/mm/init.c --- linux.21pre4/arch/parisc/mm/init.c 2003-01-29 16:27:19.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/mm/init.c 2003-01-06 15:38:40.000000000 +0000 @@ -21,6 +21,7 @@ #include #include +#include mmu_gather_t mmu_gathers[NR_CPUS]; @@ -409,6 +410,9 @@ } printk("%luk freed\n", (unsigned long)(&__init_end - &__init_begin) >> 10); + + /* set up a new led state on systems shipped LED State panel */ + pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); } /* @@ -574,13 +578,8 @@ for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) { pte_t pte; -#if !defined(CONFIG_KWDB) && !defined(CONFIG_STI_CONSOLE) +#if !defined(CONFIG_STI_CONSOLE) #warning STI console should explicitly allocate executable pages but does not -/* KWDB needs to write kernel text when setting break points. -** -** The right thing to do seems like KWDB modify only the pte which -** has a break point on it...otherwise we might mask worse bugs. -*/ /* * Map the fault vector writable so we can * write the HPMC checksum. diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/vmlinux64.lds linux.21pre4-ac2/arch/parisc/vmlinux64.lds --- linux.21pre4/arch/parisc/vmlinux64.lds 2003-01-29 16:27:20.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/vmlinux64.lds 2003-01-06 15:38:40.000000000 +0000 @@ -15,12 +15,12 @@ *(.gnu.warning) } = 0 + _etext = .; /* End of text section */ + . = ALIGN(16); .rodata : { *(.rodata) } .kstrtab : { *(.kstrtab) } - _etext = .; /* End of text section */ - .data BLOCK(8192) : { /* Data without special */ data_start = .; *(.data) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/parisc/vmlinux.lds linux.21pre4-ac2/arch/parisc/vmlinux.lds --- linux.21pre4/arch/parisc/vmlinux.lds 2003-01-29 16:27:19.000000000 +0000 +++ linux.21pre4-ac2/arch/parisc/vmlinux.lds 2003-01-06 15:38:40.000000000 +0000 @@ -14,12 +14,12 @@ *(.gnu.warning) } = 0 + _etext = .; /* End of text section */ + . = ALIGN(16); .rodata : { *(.rodata) *(.rodata.*) } .kstrtab : { *(.kstrtab) } - _etext = .; /* End of text section */ - .data BLOCK(8192) : { /* Data without special */ data_start = .; *(.data) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/ppc/config.in linux.21pre4-ac2/arch/ppc/config.in --- linux.21pre4/arch/ppc/config.in 2003-01-29 16:27:11.000000000 +0000 +++ linux.21pre4-ac2/arch/ppc/config.in 2003-02-04 14:58:59.000000000 +0000 @@ -184,12 +184,9 @@ source drivers/parport/Config.in -if [ "$CONFIG_4xx" != "y" ]; then - if [ "$CONFIG_APUS" != "y" ]; then - tristate 'Support for /dev/rtc' CONFIG_PPC_RTC - else - bool 'Generic /dev/rtc emulation' CONFIG_GEN_RTC - fi +tristate 'Generic /dev/rtc emulation' CONFIG_GEN_RTC +if [ "$CONFIG_GEN_RTC" = "n" -a "$CONFIG_APUS" != "y" ]; then + tristate 'Support for /dev/rtc' CONFIG_PPC_RTC fi if [ "$CONFIG_ALL_PPC" = "y" -a "$CONFIG_POWER3" = "n" ] ; then diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/ppc/kernel/entry.S linux.21pre4-ac2/arch/ppc/kernel/entry.S --- linux.21pre4/arch/ppc/kernel/entry.S 2003-01-29 16:27:10.000000000 +0000 +++ linux.21pre4-ac2/arch/ppc/kernel/entry.S 2003-01-06 19:13:14.000000000 +0000 @@ -264,7 +264,9 @@ .globl ret_from_fork ret_from_fork: +#ifdef CONFIG_SMP bl schedule_tail +#endif lwz r0,TASK_PTRACE(r2) andi. r0,r0,PT_TRACESYS bnel- syscall_trace diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/ppc/kernel/idle.c linux.21pre4-ac2/arch/ppc/kernel/idle.c --- linux.21pre4/arch/ppc/kernel/idle.c 2003-01-29 16:27:10.000000000 +0000 +++ linux.21pre4-ac2/arch/ppc/kernel/idle.c 2003-01-06 19:13:14.000000000 +0000 @@ -51,9 +51,6 @@ do_power_save = 1; /* endless loop with no priority at all */ - current->nice = 20; - current->counter = -100; - init_idle(); for (;;) { #ifdef CONFIG_SMP if (!do_power_save) { @@ -73,11 +70,8 @@ if (do_power_save && !current->need_resched) power_save_6xx(); #endif /* CONFIG_6xx */ - - if (current->need_resched) { - schedule(); - check_pgt_cache(); - } + schedule(); + check_pgt_cache(); } return 0; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/ppc/kernel/mk_defs.c linux.21pre4-ac2/arch/ppc/kernel/mk_defs.c --- linux.21pre4/arch/ppc/kernel/mk_defs.c 2003-01-29 16:27:10.000000000 +0000 +++ linux.21pre4-ac2/arch/ppc/kernel/mk_defs.c 2003-01-06 19:13:14.000000000 +0000 @@ -37,8 +37,7 @@ /*DEFINE(KERNELBASE, KERNELBASE);*/ DEFINE(STATE, offsetof(struct task_struct, state)); DEFINE(NEXT_TASK, offsetof(struct task_struct, next_task)); - DEFINE(COUNTER, offsetof(struct task_struct, counter)); - DEFINE(PROCESSOR, offsetof(struct task_struct, processor)); + DEFINE(PROCESSOR, offsetof(struct task_struct, cpu)); DEFINE(SIGPENDING, offsetof(struct task_struct, sigpending)); DEFINE(THREAD, offsetof(struct task_struct, thread)); DEFINE(MM, offsetof(struct task_struct, mm)); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/ppc/kernel/ppc_defs.h linux.21pre4-ac2/arch/ppc/kernel/ppc_defs.h --- linux.21pre4/arch/ppc/kernel/ppc_defs.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/arch/ppc/kernel/ppc_defs.h 2003-01-06 19:13:14.000000000 +0000 @@ -0,0 +1,81 @@ +/* + * WARNING! This file is automatically generated - DO NOT EDIT! + */ +#define STATE 0 +#define NEXT_TASK 76 +#define PROCESSOR 32 +#define SIGPENDING 8 +#define THREAD 608 +#define MM 84 +#define ACTIVE_MM 88 +#define TASK_STRUCT_SIZE 1520 +#define KSP 0 +#define PGDIR 16 +#define LAST_SYSCALL 20 +#define PT_REGS 8 +#define PT_TRACESYS 2 +#define TASK_FLAGS 4 +#define TASK_PTRACE 24 +#define NEED_RESCHED 20 +#define THREAD_FPR0 24 +#define THREAD_FPSCR 284 +#define THREAD_VR0 288 +#define THREAD_VRSAVE 816 +#define THREAD_VSCR 800 +#define TASK_UNION_SIZE 8192 +#define STACK_FRAME_OVERHEAD 16 +#define INT_FRAME_SIZE 192 +#define GPR0 16 +#define GPR1 20 +#define GPR2 24 +#define GPR3 28 +#define GPR4 32 +#define GPR5 36 +#define GPR6 40 +#define GPR7 44 +#define GPR8 48 +#define GPR9 52 +#define GPR10 56 +#define GPR11 60 +#define GPR12 64 +#define GPR13 68 +#define GPR14 72 +#define GPR15 76 +#define GPR16 80 +#define GPR17 84 +#define GPR18 88 +#define GPR19 92 +#define GPR20 96 +#define GPR21 100 +#define GPR22 104 +#define GPR23 108 +#define GPR24 112 +#define GPR25 116 +#define GPR26 120 +#define GPR27 124 +#define GPR28 128 +#define GPR29 132 +#define GPR30 136 +#define GPR31 140 +#define _NIP 144 +#define _MSR 148 +#define _CTR 156 +#define _LINK 160 +#define _CCR 168 +#define _MQ 172 +#define _XER 164 +#define _DAR 180 +#define _DSISR 184 +#define _DEAR 180 +#define _ESR 184 +#define ORIG_GPR3 152 +#define RESULT 188 +#define TRAP 176 +#define CLONE_VM 256 +#define MM_PGD 12 +#define CPU_SPEC_ENTRY_SIZE 32 +#define CPU_SPEC_PVR_MASK 0 +#define CPU_SPEC_PVR_VALUE 4 +#define CPU_SPEC_FEATURES 12 +#define CPU_SPEC_SETUP 28 +#define NUM_USER_SEGMENTS 8 diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/ppc/kernel/smp.c linux.21pre4-ac2/arch/ppc/kernel/smp.c --- linux.21pre4/arch/ppc/kernel/smp.c 2003-01-29 16:27:10.000000000 +0000 +++ linux.21pre4-ac2/arch/ppc/kernel/smp.c 2003-01-06 19:13:14.000000000 +0000 @@ -294,8 +294,6 @@ cpu_callin_map[0] = 1; current->processor = 0; - init_idle(); - for (i = 0; i < NR_CPUS; i++) { prof_counter[i] = 1; prof_multiplier[i] = 1; @@ -348,7 +346,8 @@ p = init_task.prev_task; if (!p) panic("No idle task for CPU %d", i); - del_from_runqueue(p); + init_idle(p, i); + unhash_process(p); init_tasks[i] = p; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/ppc/mm/fault.c linux.21pre4-ac2/arch/ppc/mm/fault.c --- linux.21pre4/arch/ppc/mm/fault.c 2003-01-29 16:27:10.000000000 +0000 +++ linux.21pre4-ac2/arch/ppc/mm/fault.c 2003-01-06 19:13:14.000000000 +0000 @@ -109,8 +109,6 @@ goto bad_area; if (vma->vm_start <= address) goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if (expand_stack(vma, address)) goto bad_area; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/s390/kernel/entry.S linux.21pre4-ac2/arch/s390/kernel/entry.S --- linux.21pre4/arch/s390/kernel/entry.S 2003-01-29 16:27:16.000000000 +0000 +++ linux.21pre4-ac2/arch/s390/kernel/entry.S 2003-01-06 19:13:14.000000000 +0000 @@ -254,13 +254,14 @@ ret_from_fork: basr %r13,0 l %r13,.Lentry_base-.(%r13) # setup base pointer to &entry_base + # not saving R14 here because we go to sysc_return ultimately + l %r1,BASED(.Lschedtail) + basr %r14,%r1 # call schedule_tail (unlock stuff) GET_CURRENT # load pointer to task_struct to R9 stosm 24(%r15),0x03 # reenable interrupts sr %r0,%r0 # child returns 0 st %r0,SP_R2(%r15) # store return value (change R2 on stack) - l %r1,BASED(.Lschedtail) - la %r14,BASED(sysc_return) - br %r1 # call schedule_tail, return to sysc_return + b BASED(sysc_return) # # clone, fork, vfork, exec and sigreturn need glue, diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/s390x/kernel/entry.S linux.21pre4-ac2/arch/s390x/kernel/entry.S --- linux.21pre4/arch/s390x/kernel/entry.S 2003-01-29 16:27:20.000000000 +0000 +++ linux.21pre4-ac2/arch/s390x/kernel/entry.S 2003-01-06 19:13:14.000000000 +0000 @@ -240,11 +240,11 @@ # .globl ret_from_fork ret_from_fork: + brasl %r14,schedule_tail GET_CURRENT # load pointer to task_struct to R9 stosm 48(%r15),0x03 # reenable interrupts xc SP_R2(8,%r15),SP_R2(%r15) # child returns 0 - larl %r14,sysc_return - jg schedule_tail # return to sysc_return + j sysc_return # # clone, fork, vfork, exec and sigreturn need glue, diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/s390x/kernel/linux32.c linux.21pre4-ac2/arch/s390x/kernel/linux32.c --- linux.21pre4/arch/s390x/kernel/linux32.c 2003-01-29 16:27:20.000000000 +0000 +++ linux.21pre4-ac2/arch/s390x/kernel/linux32.c 2003-01-06 23:19:40.000000000 +0000 @@ -912,64 +912,97 @@ return sys32_fcntl(fd, cmd, arg); } -struct dqblk32 { - __u32 dqb_bhardlimit; - __u32 dqb_bsoftlimit; - __u32 dqb_curblocks; - __u32 dqb_ihardlimit; - __u32 dqb_isoftlimit; - __u32 dqb_curinodes; - __kernel_time_t32 dqb_btime; - __kernel_time_t32 dqb_itime; -}; - extern asmlinkage int sys_quotactl(int cmd, const char *special, int id, caddr_t addr); -asmlinkage int sys32_quotactl(int cmd, const char *special, int id, unsigned long addr) +#ifdef CONFIG_QIFACE_COMPAT +#ifdef CONFIG_QIFACE_V1 +struct user_dqblk32 { + __u32 dqb_bhardlimit; + __u32 dqb_bsoftlimit; + __u32 dqb_curblocks; + __u32 dqb_ihardlimit; + __u32 dqb_isoftlimit; + __u32 dqb_curinodes; + __kernel_time_t32 dqb_btime; + __kernel_time_t32 dqb_itime; +}; +typedef struct v1c_mem_dqblk comp_dqblk_t; + +#define Q_COMP_GETQUOTA Q_V1_GETQUOTA +#define Q_COMP_SETQUOTA Q_V1_SETQUOTA +#define Q_COMP_SETQLIM Q_V1_SETQLIM +#define Q_COMP_SETUSE Q_V1_SETUSE +#else +struct user_dqblk32 { + __u32 dqb_ihardlimit; + __u32 dqb_isoftlimit; + __u32 dqb_curinodes; + __u32 dqb_bhardlimit; + __u32 dqb_bsoftlimit; + __u64 dqb_curspace; + __kernel_time_t32 dqb_btime; + __kernel_time_t32 dqb_itime; +}; +typedef struct v2c_mem_dqblk comp_dqblk_t; + +#define Q_COMP_GETQUOTA Q_V2_GETQUOTA +#define Q_COMP_SETQUOTA Q_V2_SETQUOTA +#define Q_COMP_SETQLIM Q_V2_SETQLIM +#define Q_COMP_SETUSE Q_V2_SETUSE +#endif + +asmlinkage int sys32_quotactl(int cmd, const char *special, int id, caddr_t addr) { int cmds = cmd >> SUBCMDSHIFT; int err; - struct dqblk d; + comp_dqblk_t d; mm_segment_t old_fs; char *spec; switch (cmds) { - case Q_GETQUOTA: - break; - case Q_SETQUOTA: - case Q_SETUSE: - case Q_SETQLIM: - if (copy_from_user (&d, (struct dqblk32 *)addr, - sizeof (struct dqblk32))) - return -EFAULT; - d.dqb_itime = ((struct dqblk32 *)&d)->dqb_itime; - d.dqb_btime = ((struct dqblk32 *)&d)->dqb_btime; - break; + case Q_COMP_GETQUOTA: + break; + case Q_COMP_SETQUOTA: + case Q_COMP_SETUSE: + case Q_COMP_SETQLIM: + if (copy_from_user(&d, (struct user_dqblk32 *)addr, + sizeof (struct user_dqblk32))) + return -EFAULT; + d.dqb_itime = ((struct user_dqblk32 *)&d)->dqb_itime; + d.dqb_btime = ((struct user_dqblk32 *)&d)->dqb_btime; + break; default: - return sys_quotactl(cmd, special, - id, (caddr_t)addr); + return sys_quotactl(cmd, special, id, (__kernel_caddr_t)addr); } spec = getname (special); err = PTR_ERR(spec); if (IS_ERR(spec)) return err; - old_fs = get_fs (); + old_fs = get_fs(); set_fs (KERNEL_DS); - err = sys_quotactl(cmd, (const char *)spec, id, (caddr_t)&d); + err = sys_quotactl(cmd, (const char *)spec, id, (__kernel_caddr_t)&d); set_fs (old_fs); putname (spec); if (err) return err; - if (cmds == Q_GETQUOTA) { + if (cmds == Q_COMP_GETQUOTA) { __kernel_time_t b = d.dqb_btime, i = d.dqb_itime; - ((struct dqblk32 *)&d)->dqb_itime = i; - ((struct dqblk32 *)&d)->dqb_btime = b; - if (copy_to_user ((struct dqblk32 *)addr, &d, - sizeof (struct dqblk32))) + ((struct user_dqblk32 *)&d)->dqb_itime = i; + ((struct user_dqblk32 *)&d)->dqb_btime = b; + if (copy_to_user ((struct user_dqblk32 *)addr, &d, + sizeof (struct user_dqblk32))) return -EFAULT; } return 0; } +#else +/* No conversion needed for new interface */ +asmlinkage int sys32_quotactl(int cmd, const char *special, int id, caddr_t addr) +{ + return sys_quotactl(cmd, special, id, addr); +} +#endif + static inline int put_statfs (struct statfs32 *ubuf, struct statfs *kbuf) { int err; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/sh/mm/fault.c linux.21pre4-ac2/arch/sh/mm/fault.c --- linux.21pre4/arch/sh/mm/fault.c 2003-01-29 16:27:15.000000000 +0000 +++ linux.21pre4-ac2/arch/sh/mm/fault.c 2003-01-06 19:13:27.000000000 +0000 @@ -72,8 +72,6 @@ return 1; check_stack: - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if (expand_stack(vma, start) == 0) goto good_area; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/sparc/kernel/sunos_ioctl.c linux.21pre4-ac2/arch/sparc/kernel/sunos_ioctl.c --- linux.21pre4/arch/sparc/kernel/sunos_ioctl.c 2003-01-29 16:27:06.000000000 +0000 +++ linux.21pre4-ac2/arch/sparc/kernel/sunos_ioctl.c 2003-01-06 18:20:22.000000000 +0000 @@ -39,8 +39,12 @@ { int ret = -EBADF; - if (fd >= SUNOS_NR_OPEN || !fcheck(fd)) + read_lock(¤t->files->file_lock); + if (fd >= SUNOS_NR_OPEN || !fcheck(fd)) { + read_unlock(¤t->files->file_lock); goto out; + } + read_unlock(¤t->files->file_lock); /* First handle an easy compat. case for tty ldisc. */ if(cmd == TIOCSETD) { diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/sparc/mm/fault.c linux.21pre4-ac2/arch/sparc/mm/fault.c --- linux.21pre4/arch/sparc/mm/fault.c 2003-01-29 16:27:06.000000000 +0000 +++ linux.21pre4-ac2/arch/sparc/mm/fault.c 2003-01-06 19:13:27.000000000 +0000 @@ -249,8 +249,6 @@ goto bad_area; if(vma->vm_start <= address) goto good_area; - if(!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if(expand_stack(vma, address)) goto bad_area; /* @@ -496,8 +494,6 @@ goto bad_area; if(vma->vm_start <= address) goto good_area; - if(!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if(expand_stack(vma, address)) goto bad_area; good_area: diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/sparc64/kernel/sunos_ioctl32.c linux.21pre4-ac2/arch/sparc64/kernel/sunos_ioctl32.c --- linux.21pre4/arch/sparc64/kernel/sunos_ioctl32.c 2003-01-29 16:27:14.000000000 +0000 +++ linux.21pre4-ac2/arch/sparc64/kernel/sunos_ioctl32.c 2003-01-06 18:20:34.000000000 +0000 @@ -100,8 +100,12 @@ if(fd >= SUNOS_NR_OPEN) goto out; - if(!fcheck(fd)) + read_lock(¤t->files->file_lock); + if(!fcheck(fd)) { + read_unlock(¤t->files->file_lock); goto out; + } + read_unlock(¤t->files->file_lock); if(cmd == TIOCSETD) { mm_segment_t old_fs = get_fs(); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/sparc64/kernel/sys_sparc32.c linux.21pre4-ac2/arch/sparc64/kernel/sys_sparc32.c --- linux.21pre4/arch/sparc64/kernel/sys_sparc32.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/arch/sparc64/kernel/sys_sparc32.c 2003-01-29 17:13:26.000000000 +0000 @@ -889,62 +889,97 @@ return sys32_fcntl(fd, cmd, arg); } -struct dqblk32 { - __u32 dqb_bhardlimit; - __u32 dqb_bsoftlimit; - __u32 dqb_curblocks; - __u32 dqb_ihardlimit; - __u32 dqb_isoftlimit; - __u32 dqb_curinodes; - __kernel_time_t32 dqb_btime; - __kernel_time_t32 dqb_itime; -}; - extern asmlinkage int sys_quotactl(int cmd, const char *special, int id, caddr_t addr); -asmlinkage int sys32_quotactl(int cmd, const char *special, int id, unsigned long addr) +#ifdef CONFIG_QIFACE_COMPAT +#ifdef CONFIG_QIFACE_V1 +struct user_dqblk32 { + __u32 dqb_bhardlimit; + __u32 dqb_bsoftlimit; + __u32 dqb_curblocks; + __u32 dqb_ihardlimit; + __u32 dqb_isoftlimit; + __u32 dqb_curinodes; + __kernel_time_t32 dqb_btime; + __kernel_time_t32 dqb_itime; +}; +typedef struct v1c_mem_dqblk comp_dqblk_t; + +#define Q_COMP_GETQUOTA Q_V1_GETQUOTA +#define Q_COMP_SETQUOTA Q_V1_SETQUOTA +#define Q_COMP_SETQLIM Q_V1_SETQLIM +#define Q_COMP_SETUSE Q_V1_SETUSE +#else +struct user_dqblk32 { + __u32 dqb_ihardlimit; + __u32 dqb_isoftlimit; + __u32 dqb_curinodes; + __u32 dqb_bhardlimit; + __u32 dqb_bsoftlimit; + __u64 dqb_curspace; + __kernel_time_t32 dqb_btime; + __kernel_time_t32 dqb_itime; +}; +typedef struct v2c_mem_dqblk comp_dqblk_t; + +#define Q_COMP_GETQUOTA Q_V2_GETQUOTA +#define Q_COMP_SETQUOTA Q_V2_SETQUOTA +#define Q_COMP_SETQLIM Q_V2_SETQLIM +#define Q_COMP_SETUSE Q_V2_SETUSE +#endif + +asmlinkage int sys32_quotactl(int cmd, const char *special, int id, caddr_t addr) { int cmds = cmd >> SUBCMDSHIFT; int err; - struct dqblk d; + comp_dqblk_t d; mm_segment_t old_fs; char *spec; switch (cmds) { - case Q_GETQUOTA: - break; - case Q_SETQUOTA: - case Q_SETUSE: - case Q_SETQLIM: - if (copy_from_user (&d, (struct dqblk32 *)addr, - sizeof (struct dqblk32))) - return -EFAULT; - d.dqb_itime = ((struct dqblk32 *)&d)->dqb_itime; - d.dqb_btime = ((struct dqblk32 *)&d)->dqb_btime; - break; + case Q_COMP_GETQUOTA: + break; + case Q_COMP_SETQUOTA: + case Q_COMP_SETUSE: + case Q_COMP_SETQLIM: + if (copy_from_user(&d, (struct user_dqblk32 *)addr, + sizeof (struct user_dqblk32))) + return -EFAULT; + d.dqb_itime = ((struct user_dqblk32 *)&d)->dqb_itime; + d.dqb_btime = ((struct user_dqblk32 *)&d)->dqb_btime; + break; default: - return sys_quotactl(cmd, special, - id, (caddr_t)addr); + return sys_quotactl(cmd, special, id, (__kernel_caddr_t)addr); } spec = getname (special); err = PTR_ERR(spec); if (IS_ERR(spec)) return err; - old_fs = get_fs (); + old_fs = get_fs(); set_fs (KERNEL_DS); - err = sys_quotactl(cmd, (const char *)spec, id, (caddr_t)&d); + err = sys_quotactl(cmd, (const char *)spec, id, (__kernel_caddr_t)&d); set_fs (old_fs); putname (spec); - if (cmds == Q_GETQUOTA) { + if (err) + return err; + if (cmds == Q_COMP_GETQUOTA) { __kernel_time_t b = d.dqb_btime, i = d.dqb_itime; - ((struct dqblk32 *)&d)->dqb_itime = i; - ((struct dqblk32 *)&d)->dqb_btime = b; - if (copy_to_user ((struct dqblk32 *)addr, &d, - sizeof (struct dqblk32))) + ((struct user_dqblk32 *)&d)->dqb_itime = i; + ((struct user_dqblk32 *)&d)->dqb_btime = b; + if (copy_to_user ((struct user_dqblk32 *)addr, &d, + sizeof (struct user_dqblk32))) return -EFAULT; } - return err; + return 0; } +#else +/* No conversion needed for new interface */ +asmlinkage int sys32_quotactl(int cmd, const char *special, int id, caddr_t addr) +{ + return sys_quotactl(cmd, special, id, addr); +} +#endif + static inline int put_statfs (struct statfs32 *ubuf, struct statfs *kbuf) { int err; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/sparc64/mm/fault.c linux.21pre4-ac2/arch/sparc64/mm/fault.c --- linux.21pre4/arch/sparc64/mm/fault.c 2003-01-29 16:27:14.000000000 +0000 +++ linux.21pre4-ac2/arch/sparc64/mm/fault.c 2003-01-06 19:13:39.000000000 +0000 @@ -373,8 +373,6 @@ if (vma->vm_start <= address) goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; if (!(fault_code & FAULT_CODE_WRITE)) { /* Non-faulting loads shouldn't expand stack. */ insn = get_fault_insn(regs, insn); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/sparc64/solaris/timod.c linux.21pre4-ac2/arch/sparc64/solaris/timod.c --- linux.21pre4/arch/sparc64/solaris/timod.c 2003-01-29 16:27:14.000000000 +0000 +++ linux.21pre4-ac2/arch/sparc64/solaris/timod.c 2003-01-06 18:20:56.000000000 +0000 @@ -149,7 +149,9 @@ struct socket *sock; SOLD("wakeing socket"); + read_lock(¤t->files->file_lock); sock = ¤t->files->fd[fd]->f_dentry->d_inode->u.socket_i; + read_unlock(¤t->files->file_lock); wake_up_interruptible(&sock->wait); read_lock(&sock->sk->callback_lock); if (sock->fasync_list && !test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) @@ -163,7 +165,9 @@ struct sol_socket_struct *sock; SOLD("queuing primsg"); + read_lock(¤t->files->file_lock); sock = (struct sol_socket_struct *)current->files->fd[fd]->private_data; + read_unlock(¤t->files->file_lock); it->next = sock->pfirst; sock->pfirst = it; if (!sock->plast) @@ -177,7 +181,9 @@ struct sol_socket_struct *sock; SOLD("queuing primsg at end"); + read_lock(¤t->files->file_lock); sock = (struct sol_socket_struct *)current->files->fd[fd]->private_data; + read_unlock(¤t->files->file_lock); it->next = NULL; if (sock->plast) sock->plast->next = it; @@ -355,7 +361,11 @@ (int (*)(int, unsigned long *))SYS(socketcall); int (*sys_sendto)(int, void *, size_t, unsigned, struct sockaddr *, int) = (int (*)(int, void *, size_t, unsigned, struct sockaddr *, int))SYS(sendto); - filp = current->files->fd[fd]; + read_lock(¤t->files->file_lock); + filp = fcheck(fd); + read_unlock(¤t->files->file_lock); + if (!filp) + return -EBADF; ino = filp->f_dentry->d_inode; sock = (struct sol_socket_struct *)filp->private_data; SOLD("entry"); @@ -636,7 +646,11 @@ SOLD("entry"); SOLDD(("%u %p %d %p %p %d %p %d\n", fd, ctl_buf, ctl_maxlen, ctl_len, data_buf, data_maxlen, data_len, *flags_p)); - filp = current->files->fd[fd]; + read_lock(¤t->files->file_lock); + filp = fcheck(fd); + read_unlock(¤t->files->file_lock); + if (!filp) + return -EBADF; ino = filp->f_dentry->d_inode; sock = (struct sol_socket_struct *)filp->private_data; SOLDD(("%p %p\n", sock->pfirst, sock->pfirst ? sock->pfirst->next : NULL)); @@ -847,7 +861,9 @@ lock_kernel(); if(fd >= NR_OPEN) goto out; - filp = current->files->fd[fd]; + read_lock(¤t->files->file_lock); + filp = fcheck(fd); + read_unlock(¤t->files->file_lock); if(!filp) goto out; ino = filp->f_dentry->d_inode; @@ -914,7 +930,9 @@ lock_kernel(); if(fd >= NR_OPEN) goto out; - filp = current->files->fd[fd]; + read_lock(¤t->files->file_lock); + filp = fcheck(fd); + read_unlock(¤t->files->file_lock); if(!filp) goto out; ino = filp->f_dentry->d_inode; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/arch/x86_64/boot/bootsect.S linux.21pre4-ac2/arch/x86_64/boot/bootsect.S --- linux.21pre4/arch/x86_64/boot/bootsect.S 2003-01-29 16:27:20.000000000 +0000 +++ linux.21pre4-ac2/arch/x86_64/boot/bootsect.S 2003-01-30 16:50:05.000000000 +0000 @@ -395,9 +395,15 @@ # NOTE: Doesn't save %ax or %dx; do it yourself if you need to. kill_motor: +#if 1 + xorw %ax, %ax # reset FDC + xorb %dl, %dl + int $0x13 +#else movw $0x3f2, %dx xorb %al, %al outb %al, %dx +#endif ret sectors: .word 0 diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/CREDITS linux.21pre4-ac2/CREDITS --- linux.21pre4/CREDITS 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/CREDITS 2003-01-30 16:55:15.000000000 +0000 @@ -2218,11 +2218,12 @@ S: United Kingdom N: Ian S. Nelson -E: ian.nelson@echostar.com +E: nelsonis@earthlink.net D: Minor mmap and ide hacks S: 1370 Atlantis Ave. S: Lafayette CO, 80026 S: USA +P: 1024D/DD478240 4073 9AF8 027F 278D CBBB DA3C 9905 0415 DD47 8240 N: Russell Nelson E: nelson@crynwr.com @@ -2405,13 +2406,10 @@ D: CDROM driver "sonycd535" (Sony CDU-535/531) N: Stelian Pop -E: stelian.pop@fr.alcove.com +E: stelian@popies.net P: 1024D/EDBB6147 7B36 0E07 04BC 11DC A7A0 D3F7 7185 9E7A EDBB 6147 D: sonypi, meye drivers, mct_u232 usb serial hacks -S: Alcôve -S: 153, bd. Anatole France -S: 93200 Saint Denis -S: France +S: Paris, France N: Frederic Potter E: fpotter@cirpack.com @@ -2549,6 +2547,12 @@ S: 7000 Stuttgart 50 S: Germany +N: Andrew Rodland +E: arodland@linuxguru.net +D: That crazy morse code thing. +P: D2B1 5215 B1B9 18E0 B6AD 6ADD 4373 165F 1770 BD5C +S: Pennsylvania, USA + N: Christoph Rohland E: hans-christoph.rohland@sap.com E: ch.rohland@gmx.net diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/Documentation/00-INDEX linux.21pre4-ac2/Documentation/00-INDEX --- linux.21pre4/Documentation/00-INDEX 2003-01-29 16:27:24.000000000 +0000 +++ linux.21pre4-ac2/Documentation/00-INDEX 2003-01-06 16:20:24.000000000 +0000 @@ -52,6 +52,8 @@ - directory with information on the CD-ROM drivers that Linux has. computone.txt - info on Computone Intelliport II/Plus Multiport Serial Driver +cpufreq + - describes the CPU frequency and voltage scaling support cpqarray.txt - info on using Compaq's SMART2 Intelligent Disk Array Controllers. devices.txt diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/Documentation/Configure.help linux.21pre4-ac2/Documentation/Configure.help --- linux.21pre4/Documentation/Configure.help 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/Documentation/Configure.help 2003-01-29 17:29:41.000000000 +0000 @@ -464,8 +464,14 @@ The initial RAM disk is a RAM disk that is loaded by the boot loader (loadlin or lilo) and that is mounted as root before the normal boot procedure. It is typically used to load modules needed to mount the - "real" root file system, etc. See - for details. + "real" root file system, etc. + + Due to a problem elsewhere in the kernel, initial RAM disks _must_ + have the file system on them created with a 1024 byte block size. + If any other value is used, the kernel will be unable to mount the + RAM disk at boot time, causing a kernel panic. + + See for details. Loopback device support CONFIG_BLK_DEV_LOOP @@ -1041,7 +1047,7 @@ Pacific Digital A-DMA support (EXPERIMENTAL) CONFIG_BLK_DEV_PDC_ADMA - Please read the comments at the top of . + Please read the comments at the top of . 3ware Hardware ATA-RAID support CONFIG_BLK_DEV_3W_XXXX_RAID @@ -1071,13 +1077,13 @@ The ATP860 is an UltraDMA 66 chipset base. The ATP860M(acintosh) version is an UltraDMA 66 chipset base. - Please read the comments at the top of . + Please read the comments at the top of . If you say Y here, then say Y to "Use DMA by default when available" as well. AEC62XX Tuning support CONFIG_AEC62XX_TUNING - Please read the comments at the top of . + Please read the comments at the top of . If unsure, say N. ALI M15x3 chipset support @@ -1088,7 +1094,7 @@ If you say Y here, you also need to say Y to "Use DMA by default when available", above. Please read the comments at the top of - . + . If unsure, say N. @@ -1112,7 +1118,7 @@ If you say Y here, you also need to say Y to "Use DMA by default when available", above. - Please read the comments at the top of . + Please read the comments at the top of . If unsure, say N. @@ -1156,16 +1162,18 @@ HPT34X AUTODMA support (WIP) CONFIG_HPT34X_AUTODMA This is a dangerous thing to attempt currently! Please read the - comments at the top of . If you say Y + comments at the top of . If you say Y here, then say Y to "Use DMA by default when available" as well. If unsure, say N. -HPT366/368/370 chipset support +HPT36X/37X chipset support CONFIG_BLK_DEV_HPT366 HPT366 is an Ultra DMA chipset for ATA-66. HPT368 is an Ultra DMA chipset for ATA-66 RAID Based. HPT370 is an Ultra DMA chipset for ATA-100. + HPT372 is an Ultra DMA chipset for ATA-133. + HPT374 is an Ultra DMA chipset for ATA-133. This driver adds up to 4 more EIDE devices sharing a single interrupt. @@ -1187,12 +1195,12 @@ This driver adds detection and support for the NS87415 chip (used in SPARC64, among others). - Please read the comments at the top of . + Please read the comments at the top of . OPTi 82C621 chipset enhanced support (EXPERIMENTAL) CONFIG_BLK_DEV_OPTI621 This is a driver for the OPTi 82C621 EIDE controller. - Please read the comments at the top of . + Please read the comments at the top of . ServerWorks OSB4/CSB5 chipset support CONFIG_BLK_DEV_SVWKS @@ -1206,7 +1214,7 @@ PIO 0-4 mode settings, this allows dynamic tuning of the chipset via the standard end-user tool 'hdparm'. - Please read the comments at the top of . + Please read the comments at the top of . If you say Y here, you should also say Y to "PIIXn Tuning support", below. @@ -1226,7 +1234,7 @@ If unsure, say N. PROMISE PDC20246/PDC20262/PDC20265/PDC20267/PDC20268 support -CONFIG_BLK_DEV_PDC202XX +CONFIG_BLK_DEV_PDC202XX_OLD Promise Ultra33 or PDC20246 Promise Ultra66 or PDC20262 Promise Ultra100 or PDC20265/PDC20267/PDC20268 @@ -1244,7 +1252,7 @@ available" as well. Please read the comments at the top of - . + . If unsure, say N. @@ -1259,7 +1267,7 @@ when the PDC20265 BIOS has been disabled (for faster boot up). Please read the comments at the top of - . + . If unsure, say N. @@ -1282,7 +1290,7 @@ If you say Y here, you need to say Y to "Use DMA by default when available" as well. - Please read the comments at the top of . + Please read the comments at the top of . Silicon Image chipset support CONFIG_BLK_DEV_SIIMAGE @@ -1301,7 +1309,7 @@ available" as well. Please read the comments at the top of - . + . Winbond SL82c105 support CONFIG_BLK_DEV_SL82C105 @@ -1314,7 +1322,7 @@ This driver adds support for bus master DMA transfers using the Tekram TRM290 PCI IDE chip. Volunteers are needed for further tweaking and development. - Please read the comments at the top of . + Please read the comments at the top of . VIA82CXXX chipset support CONFIG_BLK_DEV_VIA82CXXX @@ -1326,7 +1334,7 @@ system" support. Please read the comments at the top of - . + . If you say Y here, then say Y to "Use DMA by default when available" as well. @@ -1366,7 +1374,7 @@ boot parameter. It enables support for the secondary IDE interface of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster I/O speeds to be set as well. See the files - and for + and for more info. DTC-2278 support @@ -1375,7 +1383,7 @@ boot parameter. It enables support for the secondary IDE interface of the DTC-2278 card, and permits faster I/O speeds to be set as well. See the and - files for more info. + files for more info. Holtek HT6560B support CONFIG_BLK_DEV_HT6560B @@ -1383,7 +1391,7 @@ boot parameter. It enables support for the secondary IDE interface of the Holtek card, and permits faster I/O speeds to be set as well. See the and - files for more info. + files for more info. PROMISE DC4030 support (EXPERIMENTAL) CONFIG_BLK_DEV_PDC4030 @@ -1393,14 +1401,14 @@ attached to the secondary interface. CD-ROM and TAPE devices are not supported yet. This driver is enabled at runtime using the "ide0=dc4030" kernel boot parameter. See the - and files + and files for more info. QDI QD65XX support CONFIG_BLK_DEV_QD65XX This driver is enabled at runtime using the "ide0=qd65xx" kernel boot parameter. It permits faster I/O speeds to be set. See the - and for + and for more info. UMC 8672 support @@ -1409,7 +1417,7 @@ boot parameter. It enables support for the secondary IDE interface of the UMC-8672, and permits faster I/O speeds to be set as well. See the files and - for more info. + for more info. Amiga Gayle IDE interface support CONFIG_BLK_DEV_GAYLE @@ -1835,6 +1843,20 @@ want), say M here and read . The module will be called lvm-mod.o. +Device-mapper support +CONFIG_BLK_DEV_DM + Device-mapper is a low level volume manager. It works by allowing + people to specify mappings for ranges of logical sectors. Various + mapping types are available, in addition people may write their own + modules containing custom mappings if they wish. + + Higher level volume managers such as LVM2 use this driver. + + If you want to compile this as a module, say M here and read + . The module will be called dm-mod.o. + + If unsure, say N. + Multiple devices driver support (RAID and LVM) CONFIG_MD Support multiple physical spindles through a single logical device. @@ -4583,14 +4605,11 @@ BIOS routines contained in a ROM chip in HP PA-RISC based machines. Enabling this option will implement the linux framebuffer device and an fbcon color text console using calls to the STI BIOS routines. - The HP framebuffer device is usually planar, uses a strange memory + The HP framebuffer device is sometimes planar, using a strange memory layout, and changing the plane mask to create colored pixels - requires a call to the STI routines, so do not expect /dev/fb to - actually be useful. However, it is the best we have as far as - graphics on the HP chipsets due to lack of hardware level - documentation for the various on-board HP chipsets used in these - systems. It is sufficient for basic text console functions, - including fonts. + can require a call to the STI routines, so /dev/fb may not actually + be useful. However, on some systems packed pixel formats are supported. + It is sufficient for basic text console functions, including fonts. You should probably enable this option, unless you are having trouble getting video when booting the kernel (make sure it isn't @@ -4728,12 +4747,11 @@ messages. Most people will want to say N here. If unsure, you will also want to say N. -Matrox unified accelerated driver CONFIG_FB_MATROX - Say Y here if you have a Matrox Millennium, Millennium II, Mystique, - Mystique 220, Productiva G100, Mystique G200, Millennium G200, - Matrox G400, G450 or G550 card in your box. At this time, support for - the G-series digital output is almost non-existant. + Say Y here if you have a Matrox Millennium, Matrox Millennium II, + Matrox Mystique, Matrox Mystique 220, Matrox Productiva G100, Matrox + Mystique G200, Matrox Millennium G200, Matrox Marvel G200 video, + Matrox G400, G450 or G550 card in your box. This driver is also available as a module ( = code which can be inserted and removed from the running kernel whenever you want). @@ -4744,7 +4762,6 @@ module load time. The parameters look like "video=matrox:XXX", and are described in . -Matrox Millennium I/II support CONFIG_FB_MATROX_MILLENIUM Say Y here if you have a Matrox Millennium or Matrox Millennium II video card. If you select "Advanced lowlevel driver options" below, @@ -4752,7 +4769,6 @@ packed pixel, 24 bpp packed pixel and 32 bpp packed pixel. You can also use font widths different from 8. -Matrox Mystique support CONFIG_FB_MATROX_MYSTIQUE Say Y here if you have a Matrox Mystique or Matrox Mystique 220 video card. If you select "Advanced lowlevel driver options" below, @@ -4760,27 +4776,46 @@ packed pixel and 32 bpp packed pixel. You can also use font widths different from 8. -Matrox G100/G200/G400/G450/G550 support -CONFIG_FB_MATROX_G100 - Say Y here if you have a Matrox G100, G200, G400, G450, or G550 - based video card. If you select "Advanced lowlevel driver options", - you should check 8 bpp packed pixel, 16 bpp packed pixel, 24 bpp - packed pixel and 32 bpp packed pixel. You can also use font widths +CONFIG_FB_MATROX_G450 + Say Y here if you have a Matrox G100, G200, G400, G450 or G550 based + video card. If you select "Advanced lowlevel driver options", you + should check 8 bpp packed pixel, 16 bpp packed pixel, 24 bpp packed + pixel and 32 bpp packed pixel. You can also use font widths different from 8. If you need support for G400 secondary head, you must first say Y to "I2C support" and "I2C bit-banging support" in the character devices section, and then to "Matrox I2C support" and "G400 second head - support" here in the framebuffer section. + support" here in the framebuffer section. G450/G550 secondary head + and digital output are supported without additional modules. - If you have G550, you must also compile support for G450/G550 secondary - head into kernel, otherwise picture will be shown only on the output you - are probably not using... + The driver starts in monitor mode. You must use the matroxset tool + (available at ) to + swap primary and secondary head outputs, or to change output mode. + Secondary head driver always start in 640x480 resolution and you + must use fbset to change it. - If you need support for G450 or G550 secondary head, say Y to - "Matrox G450/G550 second head support" below. + Do not forget that second head supports only 16 and 32 bpp + packed pixels, so it is a good idea to compile them into the kernel + too. You can use only some font widths, as the driver uses generic + painting procedures (the secondary head does not use acceleration + engine). + + G450/G550 hardware can display TV picture only from secondary CRTC, + and it performs no scaling, so picture must have 525 or 625 lines. + +CONFIG_FB_MATROX_G100A + Say Y here if you have a Matrox G100, G200 or G400 based + video card. If you select "Advanced lowlevel driver options", you + should check 8 bpp packed pixel, 16 bpp packed pixel, 24 bpp packed + pixel and 32 bpp packed pixel. You can also use font widths + different from 8. + + If you need support for G400 secondary head, you must first say Y to + "I2C support" and "I2C bit-banging support" in the character devices + section, and then to "Matrox I2C support" and "G400 second head + support" here in the framebuffer section. -Matrox I2C support CONFIG_FB_MATROX_I2C This drivers creates I2C buses which are needed for accessing the DDC (I2C) bus present on all Matroxes, an I2C bus which @@ -4794,7 +4829,6 @@ If you compile it as module, it will create a module named i2c-matroxfb.o. -Matrox G400 second head support CONFIG_FB_MATROX_MAVEN WARNING !!! This support does not work with G450 !!! @@ -4823,32 +4857,14 @@ painting procedures (the secondary head does not use acceleration engine). -Matrox G450 second head support -CONFIG_FB_MATROX_G450 - Say Y or M here if you want to use a secondary head (meaning two - monitors in parallel) on G450, or if you are using analog output - of G550. - - If you compile it as module, two modules are created, - matroxfb_crtc2.o and matroxfb_g450.o. Both modules are needed if you - want two independent display devices. - - The driver starts in monitor mode and currently does not support - output in TV modes. You must use the matroxset tool (available - at ) to swap - primary and secondary head outputs. Secondary head driver always - start in 640x480 resolution and you must use fbset to change it. - - Note on most G550 cards the analog output is the secondary head, - so you will need to say Y here to use it. - - Also do not forget that second head supports only 16 and 32 bpp - packed pixels, so it is a good idea to compile them into the kernel - too. You can use only some font widths, as the driver uses generic - painting procedures (the secondary head does not use acceleration - engine). - -Matrox unified driver multihead support +CONFIG_FB_MATROX_PROC + Say Y or M here if you want to access some informations about driver + state through /proc interface. + + You should download matrox_pins tool (available at + ) to get human + readable output. + CONFIG_FB_MATROX_MULTIHEAD Say Y here if you have more than one (supported) Matrox device in your computer and you want to use all of them for different monitors @@ -5294,6 +5310,19 @@ replacement for kerneld.) Say Y here and read about configuring it in . +Kernel .config file saved in kernel image +CONFIG_IKCONFIG + This option enables the complete Linux kernel ".config" file contents + to be saved in the kernel (zipped) image file. It provides + documentation of which kernel options are used in a running kernel or + in an on-disk kernel. It can be extracted from the kernel image file + with a script and used as input to rebuild the current kernel or to + build another kernel. Since the kernel image is zipped, using this + option adds approximately 8 KB to a kernel image file. + This option is not available as a module. If you want a separate + file to save the kernel's .config contents, use 'installkernel' or 'cp' + or a similar tool, or just save it in '/lib/modules/'. + ARP daemon support CONFIG_ARPD Normally, the kernel maintains an internal cache which maps IP @@ -9117,7 +9146,7 @@ Aironet 4500/4800 I365 broken support CONFIG_AIRONET4500_I365 If you have a PCMCIA Aironet 4500/4800 card which you want to use - without the standard PCMCIA cardservices provided by the pcmcia-cs + without the standard PCMCIA card services provided by the pcmcia-cs package, say Y here. This is not recommended, so say N. Aironet 4500/4800 PCMCIA support @@ -10524,6 +10553,15 @@ If unsure, say N here. +Raw HDLC Ethernet device support +CONFIG_HDLC_RAW_ETH + Say Y to this option if you want generic HDLC driver to support + raw HDLC Ethernet device emulation over WAN (Wide Area Network) + connections. + You will need it for Ethernet over HDLC bridges. + + If unsure, say N here. + Cisco HDLC support CONFIG_HDLC_CISCO Say Y to this option if you want generic HDLC driver to support @@ -10538,13 +10576,6 @@ If unsure, say N here. -Frame-Relay bridging support -CONFIG_HDLC_FR_BRIDGE - Say Y to this option if you want generic HDLC driver to support - bridging LAN frames over Frame-Relay links. - - If unsure, say N here. - Synchronous Point-to-Point Protocol (PPP) support CONFIG_HDLC_PPP Say Y to this option if you want generic HDLC driver to support @@ -10915,31 +10946,41 @@ The safe and default value for this is N. -SysKonnect SK-98xx support +SysKonnect SK-98xx and SK-95xx Gigabit Ethernet Adapter family support CONFIG_SK98LIN - Say Y here if you have a SysKonnect SK-98xx Gigabit Ethernet Server - Adapter. The following adapters are supported by this driver: - - SK-9841 (single link 1000Base-LX) - - SK-9842 (dual link 1000Base-LX) - - SK-9843 (single link 1000Base-SX) - - SK-9844 (dual link 1000Base-SX) - - SK-9821 (single link 1000Base-T) - - SK-9822 (dual link 1000Base-T) - - SK-9861 (single link Volition connector) - - SK-9862 (dual link Volition connector) - The driver also supports the following adapters from Allied Telesyn: - - AT2970... - - The dual link adapters support a link-failover feature. Read - for information about + Say Y here if you have a SysKonnect SK-98xx or SK-95xx Gigabit + Ethernet Server Adapter. The following adapters are supported by + this driver: + - SK-9521 10/100/1000Base-T Adapter + - SK-9821 Gigabit Ethernet 1000Base-T Server Adapter + - SK-9822 Gigabit Ethernet 1000Base-T Dual Port Server Adapter + - SK-9841 Gigabit Ethernet 1000Base-LX Server Adapter + - SK-9842 Gigabit Ethernet 1000Base-LX Dual Port Server Adapter + - SK-9843 Gigabit Ethernet 1000Base-SX Server Adapter + - SK-9844 Gigabit Ethernet 1000Base-SX Dual Port Server Adapter + - SK-9861 Gigabit Ethernet 1000Base-SX Server Adapter + - SK-9862 Gigabit Ethernet 1000Base-SX Dual Port Server Adapter + - SK-9871 Gigabit Ethernet 1000Base-ZX Server Adapter + - SK-9872 Gigabit Ethernet 1000Base-ZX Dual Port Server Adapter + - SK-9821 V2.0 Gigabit Ethernet 10/100/1000Base-T Adapter + - SK-9841 V2.0 Gigabit Ethernet 1000Base-LX Adapter + - SK-9843 V2.0 Gigabit Ethernet 1000Base-SX Adapter + - SK-9851 V2.0 Gigabit Ethernet 1000Base-SX Adapter + - SK-9861 V2.0 Gigabit Ethernet 1000Base-SX Adapter + - SK-9871 V2.0 Gigabit Ethernet 1000Base-ZX Adapter + + The adapters support Jumbo Frames. + The dual link adapters support link-failover and dual port features. + The V2.0 adapters support the scatter-gather functionality with + sendfile(). Read Documentation/networking/sk98lin.txt for information about optional driver parameters. Questions concerning this driver may be addressed to: linux@syskonnect.de If you want to compile this driver as a module ( = code which can be inserted in and removed from the running kernel whenever you want), - say M here and read . This is - recommended. The module will be called sk98lin.o. + say M here and read Documentation/modules.txt. This is recommended. + The module will be called sk98lin.o. Sun GEM support CONFIG_SUNGEM @@ -12699,12 +12740,44 @@ Quota support CONFIG_QUOTA If you say Y here, you will be able to set per user limits for disk - usage (also called disk quotas). Currently, it works only for the - ext2 file system. You need additional software in order to use quota - support; for details, read the Quota mini-HOWTO, available from + usage (also called disk quotas). Currently, it works for the + ext2, ext3, and reiserfs file system. You need additional software + in order to use quota support (you can download sources from + ). For further details, read + the Quota mini-HOWTO, available from . Probably the quota support is only useful for multi user systems. If unsure, say N. +Old quota format support +CONFIG_QFMT_V1 + This quota format was (is) used by kernels earlier than 2.4.??. If + you have quota working and you don't want to convert to new quota + format say Y here. + +VFS v0 quota format support +CONFIG_QFMT_V2 + This quota format allows using quotas with 32-bit UIDs/GIDs. If you + need this functionality say Y here. Note that you will need latest + quota utilities for new quota format with this kernel. + +Compatible quota interfaces +CONFIG_QIFACE_COMPAT + This option will enable old quota interface in kernel. + If you have old quota tools (version <= 3.04) and you don't want to + upgrade them say Y here. + +Original quota interface +CONFIG_QIFACE_V1 + This is the oldest quota interface. It was used for old quota format. + If you have old quota tools and you use old quota format choose this + interface (if unsure, this interface is the best one to choose). + +VFS v0 quota interface +CONFIG_QIFACE_V2 + This quota interface was used by VFS v0 quota format. If you need + support for VFS v0 quota format (eg. you're using quota on ReiserFS) + and you don't want to upgrade quota tools, choose this interface. + Memory Technology Device (MTD) support CONFIG_MTD Memory Technology Devices are flash, RAM and similar chips, often @@ -15312,7 +15385,7 @@ debugging output from the driver. This is unlike previous versions of the driver, where enabling this option would turn on debugging output automatically. - + Example: mount -t befs /dev/hda2 /mnt -o debug @@ -15715,6 +15788,30 @@ If unsure, say N. +Allow direct I/O on files in NFS +CONFIG_NFS_DIRECTIO + There are important applications whose performance or correctness + depends on uncached access to file data. Database clusters (multiple + copies of the same instance running on separate hosts) implement their + own cache coherency protocol that subsumes the NFS cache protocols. + Applications that process datasets considerably larger than the client's + memory do not always benefit from a local cache. A streaming video + server, for instance, has no need to cache the contents of a file. + + This option enables applications to perform direct I/O on files in NFS + file systems using the O_DIRECT open() flag. When O_DIRECT is set for + files, their data is not cached in the system's page cache. Direct + read and write operations are aligned to block boundaries. Data is + moved to and from user-level application buffers directly. + + Unless your program is designed to use O_DIRECT properly, you are much + better off allowing the NFS client to manage caching for you. Misusing + O_DIRECT can cause poor server performance or network storms. This + kernel build option defaults OFF to avoid exposing system administrators + unwittingly to a potentially hazardous feature. + + If unsure, say N. + Root file system on NFS CONFIG_ROOT_NFS If you want your Linux box to mount its whole root file system (the @@ -16111,7 +16208,7 @@ Say Y here if you would like to use hard disks under Linux which were partitioned on a Macintosh. -Windows Logical Disk Manager (Dynamic Disk) support (EXPERIMENTAL) +Windows Logical Disk Manager (Dynamic Disk) support CONFIG_LDM_PARTITION Say Y here if you would like to use hard disks under Linux which were partitioned using Windows 2000's or XP's Logical Disk Manager. @@ -16126,8 +16223,7 @@ Normal partitions are now called Basic Disks under Windows 2000 and XP. - Technical documentation to accompany this driver is available from: - . + For a fuller description read . If unsure, say N. @@ -16200,8 +16296,9 @@ Intel EFI GUID partition support CONFIG_EFI_PARTITION Say Y here if you would like to use hard disks under Linux which - were partitioned using EFI GPT. Presently only useful on the - IA-64 platform. + were partitioned using EFI GPT. This is the default partition + scheme on IA64, and can be used on other platforms when + large block device (64-bit block address) support is desired. Ultrix partition table support CONFIG_ULTRIX_PARTITION @@ -17043,17 +17140,36 @@ HIL keyboard support CONFIG_HIL The "Human Interface Loop" is a older, 8-channel USB-like controller - used in Hewlett Packard PA-RISC based machines. There are a few - cases where it is seen on PC/MAC architectures as well, usually also - manufactured by HP. This driver is based off MACH and BSD drivers, - and implements support for a keyboard attached to the HIL port. + used in several Hewlett Packard models. This driver is based off + MACH and BSD drivers, and implements support for a keyboard attached + to the HIL port, but not for any other types of HIL input devices + like mice or tablets. However, it has been thoroughly tested and is + stable. + Full support for the USB-like functions and non-keyboard channels of - the HIL is not provided for in this driver. There are vestiges of - mouse support in the driver, but it is probably not working. The - necessary hardware documentation to fully support the HIL controller - and interface it to the linux-input API is lacking. + the HIL is currently being added to the PA-RISC port and will + be backported to work on the m68k port as well. + + Enable this option if you intend to use a HIL keyboard as your + primary keyboard and/or do not wish to test the new HIL driver. - Enable this option if you intend to use a HIL keyboard. +HP System Device Controller support +CONFIG_HP_SDC + This option enables supports for the the "System Device Controller", + an i8042 carrying microcode to manage a few miscellanous devices + on some Hewlett Packard systems. The SDC itself contains a 10ms + resolution timer/clock capable of delivering interrupts on periodic + and one-shot basis. The SDC may also be connected to a battery-backed + real-time clock, a basic audio waveform generator, and an HP-HIL + Master Link Controller serving up to seven input devices. + + By itself this option is rather useless, but enabling it will + enable selection of drivers for the abovementioned devices. + It is, however, incompatible with the old, reliable HIL keyboard + driver, and the new HIL driver is experimental, so if you plan to + use a HIL keyboard as your primary keyboard, you may wish to + keep using that driver until the new HIL drivers have had more + testing. Include IOP (IIfx/Quadra 9x0) ADB driver CONFIG_ADB_IOP @@ -17363,6 +17479,19 @@ read . The module will be called istallion.o. +PDC software console support +CONFIG_PDC_CONSOLE + Saying Y here will enable the software based PDC console to be + used as the system console. This is useful for machines in + which the hardware based console has not been written yet. The + following steps must be competed to use the PDC console: + + 1. create the device entry (mknod /dev/ttyB0 c 60 0) + 2. Edit the /etc/inittab to start a getty listening on /dev/ttyB0 + 3. Add device ttyB0 to /etc/securetty (if you want to log on as + root on this console.) + 4. Change the kernel command console parameter to: console=ttyB0 + Microgate SyncLink adapter support CONFIG_SYNCLINK Provides support for the SyncLink ISA and PCI multiprotocol serial @@ -17512,6 +17641,10 @@ doing that; to actually get it to happen you need to pass the option "console=lp0" to the kernel at boot time. + Note that kernel messages can get lost if the printer is out of + paper (or off, or unplugged, or too busy..), but this behaviour + can be changed. See drivers/char/lp.c (do this at your own risk). + If the printer is out of paper (or off, or unplugged, or too busy..) the kernel will stall until the printer is ready again. By defining CONSOLE_LP_STRICT to 0 (at your own risk) you @@ -19017,6 +19150,15 @@ . The module will be called cpuid.o +x86 BIOS Enhanced Disk Drive support +CONFIG_EDD + Say Y or M here if you want to enable BIOS Enhanced Disk Drive + Services real mode BIOS calls to determine which disk + BIOS tries boot from. This information is then exported via /proc. + + This option is experimental, but believed to be safe, + and most disk controller BIOS vendors do not yet implement this feature. + SBC-60XX Watchdog Timer CONFIG_60XX_WDT This driver can be used with the watchdog timer found on some @@ -19083,6 +19225,34 @@ The module is called rtc.o. If you want to compile it as a module, say M here and read . +Generic Real Time Clock Support +CONFIG_GEN_RTC + If you say Y here and create a character special file /dev/rtc with + major number 10 and minor number 135 using mknod ("man mknod"), you + will get access to the real time clock (or hardware clock) built + into your computer. + + In 2.4 and later kernels this is the only way to set and get rtc + time on m68k systems so it is highly recommended. + + It reports status information via the file /proc/driver/rtc and its + behaviour is set by various ioctls on /dev/rtc. If you enable the + "extended RTC operation" below it will also provide an emulation + for RTC_UIE which is required by some programs and may improve + precision in some cases. + + This driver is also available as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module is called genrtc.o. If you want to compile it as a module, + say M here and read . To load the + module automatically add 'alias char-major-10-135 genrtc' to your + /etc/modules.conf + +Extended RTC operation +CONFIG_GEN_RTC_X + Provides an emulation for RTC_UIE which is required by some programs + and may improve precision of the generic RTC support in some cases. + Tadpole ANA H8 Support CONFIG_H8 The Hitachi H8/337 is a microcontroller used to deal with the power @@ -23065,6 +23235,23 @@ say M here and read . The module will be called radio-sf16fmi.o. +SF16FMR2 Radio +CONFIG_RADIO_SF16FMR2 + Choose Y here if you have one of these FM radio cards. If you + compile the driver into the kernel and your card is not PnP one, you + have to add "sf16fmr2=" to the kernel command line (I/O address is + 0x284 or 0x384, default 0x384). + + In order to control your radio card, you will need to use programs + that are compatible with the Video For Linux API. Information on + this API and pointers to "v4l" programs may be found on the WWW at + . + + If you want to compile this driver as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here and read . The module + will be called radio-sf16fmr2.o. + Typhoon Radio (a.k.a. EcoRadio) CONFIG_RADIO_TYPHOON Choose Y here if you have one of these FM radio cards, and then fill @@ -25817,6 +26004,14 @@ of the BUG call as well as the EIP and oops trace. This aids debugging but costs about 70-100K of memory. +Morse code panics +CONFIG_PANIC_MORSE + Say Y here to receive panic messages in morse code on your keyboard LEDs, and + optionally the PC speaker, if available. + The kernel param "panicblink" controls this feature, set it to 0 to disable, + 1 for LEDs only, 2 for pc speaker, or 3 for both. If you disable this option, + then you will receive a steady blink on the LEDs instead. + Include kgdb kernel debugger CONFIG_KGDB Include in-kernel hooks for kgdb, the Linux kernel source level @@ -25862,9 +26057,11 @@ U2/Uturn I/O MMU CONFIG_IOMMU_CCIO - Say Y here to enable DMA management routines for the first - generation of PA-RISC cache-coherent machines. Programs the - U2/Uturn chip in "Virtual Mode" and use the I/O MMU. + The U2/UTurn is a bus converter with io mmu present in the Cxxx, D, + J, K, and R class machines. Compiling this driver into the kernel will + not hurt anything, removing it will reduce your kernel by about 14k. + + If unsure, say Y. LBA/Elroy PCI support CONFIG_PCI_LBA @@ -26237,6 +26434,96 @@ written) to implement the policy. If you don't understand what this is all about, it's safe to say 'N'. + For more information, take a look at linux/Documentation/cpufreq or + at + + If in doubt, say N. + +CONFIG_CPU_FREQ_24_API + This enables the /proc/sys/cpu/ sysctl interface for controlling + CPUFreq, as known from the 2.4.-kernel patches for CPUFreq. 2.5 + uses /proc/cpufreq instead. Please note that some drivers do not + work with the 2.4. /proc/sys/cpu sysctl interface, so if in doubt, + say N here. + + For details, take a look at linux/Documentation/cpufreq. + + If in doubt, say N. + +CONFIG_X86_POWERNOW_K6 + This adds the CPUFreq driver for mobile AMD K6-2+ and mobile + AMD K6-3+ processors. + + For details, take a look at linux/Documentation/cpufreq. + + If in doubt, say N. + +CONFIG_X86_P4_CLOCKMOD + This adds the CPUFreq driver for Intel Pentium 4 / XEON + processors. + + For details, take a look at linux/Documentation/cpufreq. + + If in doubt, say N. + +CONFIG_ELAN_CPUFREQ + This adds the CPUFreq driver for AMD Elan SC400 and SC410 + processors. + + You need to specify the processor maximum speed as a boot + parameter (or as module parameter): + elanfreq=maxspeed + with the argument "maxspeed" given in kHz. + + For details, take a look at linux/Documentation/cpufreq. + + If in doubt, say N. + +CONFIG_X86_LONGHAUL + This adds the CPUFreq driver for VIA Samuel/CyrixIII, + VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T + processors. + + If you do not want to scale the Front Side Bus or voltage, + pass the module parameter "dont_scale_fsb=1" or + "dont_scale_voltage=1". Additionally, it is advised that + you pass the current Front Side Bus speed (in MHz) to + this module as module parameter "current_fsb", e.g. + "current_fsb=133" for a Front Side Bus speed of 133 MHz. + + For details, take a look at linux/Documentation/cpufreq. + + If in doubt, say N. + +CONFIG_X86_SPEEDSTEP + This adds the CPUFreq driver for certain mobile Intel Pentium III + (Coppermine), all mobile Intel Pentium III-M (Tulatin) and all + mobile Intel Pentium 4 P4-Ms. + + If you use a Coppermine Pentium III which is capable of + SpeedStep, you need to pass the boot or module parameter + "speedstep_coppermine=1" to the kernel. + + For details, take a look at linux/Documentation/cpufreq. + + If in doubt, say N. + +CONFIG_X86_LONGRUN + This adds the CPUFreq driver for Transmeta Crusoe processors which + support LongRun. + + For details, take a look at linux/Documentation/cpufreq. + + If in doubt, say N. + +CONFIG_X86_GX_SUSPMOD + This add the CPUFreq driver for NatSemi Geode processors which + support suspend modulation. + + For details, take a look at linux/Documentation/cpufreq. + + If in doubt, say N. + SiS CONFIG_DRM_SIS Choose this option if you have a SIS graphics card. AGP support is @@ -26250,7 +26537,7 @@ Slave has its own LEDs CONFIG_ETRAX_ETHERNET_LPSLAVE_HAS_LEDS - Enable if the slave has it's own LEDs. + Enable if the slave has its own LEDs. ATA/IDE support CONFIG_ETRAX_IDE @@ -26420,6 +26707,83 @@ If unsure, say N. +NatSemi SCx200 support +CONFIG_SCx200 + This provides basic support for the National Semiconductor SCx200 + processor. Right now this is just a driver for the GPIO pins. + + If you don't know what to do here, say N. + + This support is also available as a module. If compiled as a + module, it will be called scx200.o. + +NatSemi SCx200 Watchdog +CONFIG_SCx200_WDT + Enable the built-in watchdog timer support on the National + Semiconductor SCx200 processors. + + If compiled as a module, it will be called scx200_watchdog.o. + +Flash device mapped with DOCCS on NatSemi SCx200 +CONFIG_MTD_SCx200_DOCFLASH + Enable support for a flash chip mapped using the DOCCS signal on a + National Semiconductor SCx200 processor. + + If you don't know what to do here, say N. + + If compiled as a module, it will be called scx200_docflash.o. + +NatSemi SCx200 I2C using GPIO pins +CONFIG_SCx200_I2C + Enable the use of two GPIO pins of a SCx200 processor as an I2C bus. + + If you don't know what to do here, say N. + + If compiled as a module, it will be called scx200_i2c.o. + +GPIO pin used for SCL +CONFIG_SCx200_I2C_SCL + Enter the GPIO pin number used for the SCL signal. This value can + also be specified with a module parameter. + +GPIO pin used for SDA +CONFIG_SCx200_I2C_SDA + Enter the GPIO pin number used for the SSA signal. This value can + also be specified with a module parameter. + +NatSemi SCx200 ACCESS.bus +CONFIG_SCx200_ACB + Enable the use of the ACCESS.bus controllers of a SCx200 processor. + + If you don't know what to do here, say N. + + If compiled as a module, it will be called scx200_acb.o. + +IPMI top-level message handler +CONFIG_IPMI_HANDLER + This enables the central IPMI message handler, required for IPMI + to work. Note that you must have this enabled to do any other IPMI + things. See IPMI.txt for more details. + +Generate a panic event to all BMCs on a panic +CONFIG_IPMI_PANIC_EVENT + When a panic occurs, this will cause the IPMI message handler to + generate an IPMI event describing the panic to each interface + registered with the message handler. + +Device interface for IPMI +CONFIG_IPMI_DEVICE_INTERFACE + This provides an IOCTL interface to the IPMI message handler so + userland processes may use IPMI. It supports poll() and select(). + +IPMI KCS handler +CONFIG_IPMI_KCS + Provides a driver for a KCS-style interface to a BMC. + +IPMI Watchdog Timer +CONFIG_IPMI_WATCHDOG + This enables the IPMI watchdog timer. + # # A couple of things I keep forgetting: # capitalize: AppleTalk, Ethernet, DOS, DMA, FAT, FTP, Internet, diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/Documentation/cpufreq linux.21pre4-ac2/Documentation/cpufreq --- linux.21pre4/Documentation/cpufreq 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/Documentation/cpufreq 2003-01-06 16:33:44.000000000 +0000 @@ -0,0 +1,363 @@ + CPU frequency and voltage scaling code in the Linux(TM) kernel + + + L i n u x C P U F r e q + + + + + Dominik Brodowski + David Kimdon + + + + Clock scaling allows you to change the clock speed of the CPUs on the + fly. This is a nice method to save battery power, because the lower + the clock speed, the less power the CPU consumes. + + + +Contents: +--------- +1. Supported architectures +2. User interface +2.1 /proc/cpufreq interface [2.6] +2.2. /proc/sys/cpu/ interface [2.4] +3. CPUFreq core and interfaces +3.1 General information +3.2 CPUFreq notifiers +3.3 CPUFreq architecture drivers +4. Mailing list and Links + + + +1. Supported architectures +========================== + +ARM: + ARM Integrator, SA 1100, SA1110 +-------------------------------- + No known issues. + + +AMD Elan: + SC400, SC410 +-------------------------------- + You need to specify the highest allowed CPU frequency as + a module parameter ("max_freq") or as boot parameter + ("elanfreq="). Else the available speed range will be + limited to the speed at which the CPU runs while this + module is loaded. + + +VIA Cyrix Longhaul: + VIA Samuel/CyrixIII, VIA Cyrix Samuel/C3, + VIA Cyrix Ezra, VIA Cyrix Ezra-T +-------------------------------- + If you do not want to scale the Front Side Bus or voltage, + pass the module parameter "dont_scale_fsb=1" or + "dont_scale_voltage=1". Additionally, it is advised that + you pass the current Front Side Bus speed (in MHz) to + this module as module parameter "current_fsb", e.g. + "current_fsb=133" for a Front Side Bus speed of 133 MHz. + + +Intel SpeedStep: + certain mobile Intel Pentium III (Coppermine), and all mobile + Intel Pentium III-M (Tualatin) and mobile Intel Pentium 4 P4-Ms. +-------------------------------- + Unfortunately, only modern Intel ICH2-M and ICH3-M chipsets are + supported yet. + + +P4 CPU Clock Modulation: + Intel Pentium 4 Xeon processors +--------------------------------- + Note that you can only switch the speed of two logical CPUs at + once - but each phyiscal CPU may have different throttling levels. + + +PowerNow! K6: + mobile AMD K6-2+ / mobile K6-3+: +-------------------------------- + No known issues. + + +Transmeta Crusoe Longrun: + Transmeta Crusoe processors: +-------------------------------- + It is recommended to use the 2.6. /proc/cpufreq interface when + using this driver + + + +2. User Interface +================= + +2.1 /proc/cpufreq interface [2.6] +*********************************** + +Starting in the patches for kernel 2.5.33, CPUFreq uses a "policy" +interface /proc/cpufreq. + +When you "cat" this file, you'll find something like: + +-- + minimum CPU frequency - maximum CPU frequency - policy +CPU 0 1200000 ( 75%) - 1600000 (100%) - performance +-- + +This means the current policy allows this CPU to be run anywhere +between 1.2 GHz (the value is in kHz) and 1.6 GHz with an eye towards +performance. + +To change the policy, "echo" the desired new policy into +/proc/cpufreq. Use one of the following formats: + +cpu_nr:min_freq:max_freq:policy +cpu_nr%min_freq%max_freq%policy +min_freq:max_freq:policy +min_freq%max_freq%policy + +with cpu_nr being the CPU which shall be affected, min_freq and +max_freq the lower and upper limit of the CPU core frequency in kHz, +and policy either "performance" or "powersave". +A few examples: + +root@notebook:#echo -n "0:0:0:powersave" > /proc/cpufreq + sets the CPU #0 to the lowest supported frequency. + +root@notebook:#echo -n "1%100%100%performance" > /proc/cpufreq + sets the CPU #1 to the highest supported frequency. + +root@notebook:#echo -n "1000000:2000000:performance" > /proc/cpufreq + to set the frequency of all CPUs between 1 GHz and 2 GHz and to + the policy "performance". + +Please note that the values you "echo" into /proc/cpufreq are +validated first, and may be limited by hardware or thermal +considerations. Because of this, a read from /proc/cpufreq might +differ from what was written into it. + + +When you read /proc/cpufreq for the first time after a CPUFreq driver +has been initialized, you'll see the "default policy" for this +driver. If this does not suit your needs, you can pass a boot +parameter to the cpufreq core. Use the following syntax for this: + "cpufreq=min_freq:max_freq:policy", i.e. you may not chose a +specific CPU and you need to specify the limits in kHz and not in +per cent. + + +2.2 /proc/cpufreq interface [2.4] +*********************************** + +Previsiously (and still available as a config option), CPUFreq used +a "sysctl" interface which is located in + /proc/sys/cpu/0/ + /proc/sys/cpu/1/ ... (SMP only) + +In these directories, you will find three files of importance for +CPUFreq: speed-max, speed-min and speed: + +speed shows the current CPU frequency in kHz, +speed-min the minimum supported CPU frequency, and +speed-max the maximum supported CPU frequency. + + +To change the CPU frequency, "echo" the desired CPU frequency (in kHz) +to speed. For example, to set the CPU speed to the lowest/highest +allowed frequency do: + +root@notebook:# cat /proc/sys/cpu/0/speed-min > /proc/sys/cpu/0/speed +root@notebook:# cat /proc/sys/cpu/0/speed-max > /proc/sys/cpu/0/speed + + + +3. CPUFreq core and interfaces +=============================== + +3.1 General information +************************* + +The CPUFreq core code is located in linux/kernel/cpufreq.c. This +cpufreq code offers a standardized interface for the CPUFreq +architecture drivers (those pieces of code that do actual +frequency transitions), as well as to "notifiers". These are device +drivers or other part of the kernel that need to be informed of +policy changes (like thermal modules like ACPI) or of all +frequency changes (like timing code) or even need to force certain +speed limits (like LCD drivers on ARM architecture). Additionally, the +kernel "constant" loops_per_jiffy is updated on frequency changes +here. + + +3.2 CPUFreq notifiers +*********************** + +CPUFreq notifiers conform to the standard kernel notifier interface. +See linux/include/linux/notifier.h for details on notifiers. + +There are two different CPUFreq notifiers - policy notifiers and +transition notifiers. + + +3.2.1 CPUFreq policy notifiers +****************************** + +These are notified when a new policy is intended to be set. Each +CPUFreq policy notifier is called three times for a policy transition: + +1.) During CPUFREQ_ADJUST all CPUFreq notifiers may change the limit if + they see a need for this - may it be thermal considerations or + hardware limitations. + +2.) During CPUFREQ_INCOMPATIBLE only changes may be done in order to avoid + hardware failure. + +3.) And during CPUFREQ_NOTIFY all notifiers are informed of the new policy + - if two hardware drivers failed to agree on a new policy before this + stage, the incompatible hardware shall be shut down, and the user + informed of this. + +The phase is specified in the second argument to the notifier. + +The third argument, a void *pointer, points to a struct cpufreq_policy +consisting of five values: cpu, min, max, policy and max_cpu_freq. Min +and max are the lower and upper frequencies (in kHz) of the new +policy, policy the new policy, cpu the number of the affected CPU or +CPUFREQ_ALL_CPUS for all CPUs; and max_cpu_freq the maximum supported +CPU frequency. This value is given for informational purposes only. + + +3.2.2 CPUFreq transition notifiers +********************************** + +These are notified twice when the CPUfreq driver switches the CPU core +frequency and this change has any external implications. + +The second argument specifies the phase - CPUFREQ_PRECHANGE or +CPUFREQ_POSTCHANGE. + +The third argument is a struct cpufreq_freqs with the following +values: +cpu - number of the affected CPU or CPUFREQ_ALL_CPUS +old - old frequency +new - new frequency + + +3.3 CPUFreq architecture drivers +********************************** + +CPUFreq architecture drivers are the pieces of kernel code that +actually perform CPU frequency transitions. These need to be +initialized separately (separate initcalls), and may be +modularized. They interact with the CPUFreq core in the following way: + +cpufreq_register() +------------------ +cpufreq_register registers an arch driver to the CPUFreq core. Please +note that only one arch driver may be registered at any time. -EBUSY +is returned when an arch driver is already registered. The argument to +cpufreq_register, struct cpufreq_driver *driver, is described later. + +cpufreq_unregister() +-------------------- +cpufreq_unregister unregisters an arch driver, e.g. on module +unloading. Please note that there is no check done that this is called +from the driver which actually registered itself to the core, so +please only call this function when you are sure the arch driver got +registered correctly before. + +cpufreq_notify_transition() +--------------------------- +On "dumb" hardware where only fixed frequency can be set, the driver +must call cpufreq_notify_transition() once before, and once after the +actual transition. + +struct cpufreq_driver +--------------------- +On initialization, the arch driver is supposed to pass a pointer +to a struct cpufreq_driver *cpufreq_driver consisting of the following +entries: + +cpufreq_verify_t verify: This is a pointer to a function with the + following definition: + int verify_function (struct cpufreq_policy *policy). + This function must verify the new policy is within the limits + supported by the CPU, and at least one supported CPU is within + this range. It may be useful to use cpufreq.h / + cpufreq_verify_within_limits for this. If this is called with + CPUFREQ_ALL_CPUS, and there is no common subset of frequencies + for all CPUs, exit with an error. + +cpufreq_setpolicy_t setpolicy: This is a pointer to a function with + the following definition: + int setpolicy_function (struct cpufreq_policy *policy). + This function must set the CPU to the new policy. If it is a + "dumb" CPU which only allows fixed frequencies to be set, it + shall set it to the lowest within the limit for + CPUFREQ_POLICY_POWERSAVE, and to the highest for + CPUFREQ_POLICY_PERFORMANCE. Once CONFIG_CPU_FREQ_DYNAMIC is + implemented, it can use a dynamic method to adjust the speed + between the lower and upper limit. + +struct cpufreq_policy *policy: This is an array of NR_CPUS struct + cpufreq_policies, containing the current policies set for these + CPUs. Note that policy[cpu].max_cpu_freq must contain the + absolute maximum CPU frequency supported by the specified cpu. + +In case the driver is expected to run with the 2.4.-style API +(/proc/sys/cpu/.../), two more values must be passed +#ifdef CONFIG_CPU_FREQ_24_API + unsigned int cpu_min_freq[NR_CPUS]; + unsigned int cpu_cur_freq[NR_CPUS]; +#endif + with cpu_min_freq[cpu] being the minimum CPU frequency + supported by the CPU; and the entries in cpu_cur_freq + reflecting the current speed of the appropriate CPU. + +Some Requirements to CPUFreq architecture drivers +------------------------------------------------- +* Only call cpufreq_register() when the ability to switch CPU + frequencies is _verified_ or can't be missing. Also, all + other initialization must be done beofre this call, as + cpfureq_register calls the driver's verify and setpolicy code for + each CPU. +* cpufreq_unregister() may only be called if cpufreq_register() has + been successfully(!) called before. +* kfree() the struct cpufreq_driver only after the call to + cpufreq_unregister(), unless cpufreq_register() failed. + + + +4. Mailing list and Links +************************* + + +Mailing List +------------ +There is a CPU frequency changing CVS commit and general list where +you can report bugs, problems or submit patches. To post a message, +send an email to cpufreq@www.linux.org.uk, to subscribe go to +http://www.linux.org.uk/mailman/listinfo/cpufreq. Previous post to the +mailing list are available to subscribers at +http://www.linux.org.uk/mailman/private/cpufreq/. + + +Links +----- +the FTP archives: +* ftp://ftp.linux.org.uk/pub/linux/cpufreq/ + +how to access the CVS repository: +* http://cvs.arm.linux.org.uk/ + +the CPUFreq Mailing list: +* http://www.linux.org.uk/mailman/listinfo/cpufreq + +Clock and voltage scaling for the SA-1100: +* http://www.lart.tudelft.nl/projects/scaling + +CPUFreq project homepage +* http://www.brodo.de/cpufreq/ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/Documentation/DocBook/videobook.tmpl linux.21pre4-ac2/Documentation/DocBook/videobook.tmpl --- linux.21pre4/Documentation/DocBook/videobook.tmpl 2003-01-29 16:27:24.000000000 +0000 +++ linux.21pre4-ac2/Documentation/DocBook/videobook.tmpl 2003-01-08 15:25:42.000000000 +0000 @@ -146,16 +146,17 @@ int __init myradio_init(struct video_init *v) { - if(check_region(io, MY_IO_SIZE)) + if(!request_region(io, MY_IO_SIZE, "myradio")) { printk(KERN_ERR "myradio: port 0x%03X is in use.\n", io); return -EBUSY; } - if(video_device_register(&my_radio, VFL_TYPE_RADIO)==-1) + if(video_device_register(&my_radio, VFL_TYPE_RADIO)==-1) { + release_region(io, MY_IO_SIZE); return -EINVAL; - request_region(io, MY_IO_SIZE, "myradio"); + } return 0; } @@ -920,7 +921,7 @@ int __init mycamera_init(struct video_init *v) { - if(check_region(io, MY_IO_SIZE)) + if(!request_region(io, MY_IO_SIZE, "mycamera")) { printk(KERN_ERR "mycamera: port 0x%03X is in use.\n", io); @@ -928,9 +929,10 @@ } if(video_device_register(&my_camera, - VFL_TYPE_GRABBER)==-1) + VFL_TYPE_GRABBER)==-1) { + release_region(io, MY_IO_SIZE); return -EINVAL; - request_region(io, MY_IO_SIZE, "mycamera"); + } return 0; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/Documentation/DriverFixers linux.21pre4-ac2/Documentation/DriverFixers --- linux.21pre4/Documentation/DriverFixers 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/Documentation/DriverFixers 2003-01-06 15:44:42.000000000 +0000 @@ -0,0 +1,75 @@ +People who fix drivers as a business - ie for money. (No recommendation, +business association or other relationship implied. This for the benefit of +American lawyers is just a list of people who have asked to be listed - nothing +more). + +Companies Who Will Do Small Contract Work +----------------------------------------- + +Company: BitWizard +Contact: Rogier Wolff +E-Mail: R.E.Wolff@BitWizard.nl + +Company: Caederus +Contact: Justin Mitchell +E-Mail: info@caederus.com +Location: Swansea, Wales, UK +URL: http://www.caederus.com/ + +Company: Calsoft Inc +Contact: Anupam Bhide +E-Mail: anupam@calsoftinc.com +URL: http://www.calsoftinc.com +Location: Pune, India + +Company: Hansen Partnership Inc +Contact: James Bottomley +E-Mail: James.Bottomley@HansenPartnership.com +Location: 1, Partridge Square, Oswego, Illinois 60543, USA + +Company: Linking +Contact: Elmer Joandi +E-Mail: elmer@linkingsoft.com + +Company: Penguru Consulting, LLC +Contact: Komron Takmil +E-Mail: komron@penguru.net +Location: Salt Lake City, UT USA + +Company: 7Chips +Contact: Vadim Lebedev +E-Mail: vadim@7chips.com +Location: Paris, France +Notes: Experienced in Linux and uClinux on x86/ARM/Motorola + +Company: Weinigel Ingenjörsbyrå AB +Contact: Christer Weinigel +E-Mail: christer@weinigel.se +Location: Stockholm, Sweden + +Company: WildOpenSource +Contact: Martin Hicks +E-Mail: info@wildopensource.com + + +Companies Only Interested In Larger ($10000+) Jobs +-------------------------------------------------- + + + +Companies Only Interested In Very Large ($100000+) Jobs +------------------------------------------------------- + + +To be added to the list: email giving the +following information + +Company: CompanyName [Required] +Contact: ContactName [Required] +E-Mail: An email address [Required] +URL: Web site [Optional] +Location: Area/Country [Optional] +Telephone: Contact phone number [Optional] +Speciality: Any specific speciality [Optional] +Notes: Any other notes (eg certifications, specialities) + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/Documentation/i386/zero-page.txt linux.21pre4-ac2/Documentation/i386/zero-page.txt --- linux.21pre4/Documentation/i386/zero-page.txt 2003-01-29 16:27:24.000000000 +0000 +++ linux.21pre4-ac2/Documentation/i386/zero-page.txt 2003-01-08 15:33:39.000000000 +0000 @@ -31,6 +31,7 @@ 0x1e0 unsigned long ALT_MEM_K, alternative mem check, in Kb 0x1e8 char number of entries in E820MAP (below) +0x1e9 unsigned char number of entries in EDDBUF (below) 0x1f1 char size of setup.S, number of sectors 0x1f2 unsigned short MOUNT_ROOT_RDONLY (if !=0) 0x1f4 unsigned short size of compressed kernel-part in the @@ -66,6 +67,7 @@ 0x220 4 bytes (setup.S) 0x224 unsigned short setup.S heap end pointer 0x2d0 - 0x600 E820MAP +0x600 - 0x7D4 EDDBUF (setup.S) 0x800 string, 2K max COMMAND_LINE, the kernel commandline as copied using CL_OFFSET. diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/Documentation/IPMI.txt linux.21pre4-ac2/Documentation/IPMI.txt --- linux.21pre4/Documentation/IPMI.txt 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/Documentation/IPMI.txt 2003-01-09 00:30:24.000000000 +0000 @@ -307,7 +307,7 @@ used to control it: insmod ipmi_watchdog timeout= pretimeout= action= - preaction= + preaction= preop= The timeout is the number of seconds to the action, and the pretimeout is the amount of seconds before the reset that the pre-timeout panic will @@ -319,7 +319,18 @@ The preaction may be "pre_smi" for an indication through the SMI interface, "pre_int" for an indication through the SMI with an -interrupts, and "pre_nmi" for a NMI on a preaction. +interrupts, and "pre_nmi" for a NMI on a preaction. This is how +the driver is informed of the pretimeout. + +The preop may be set to "preop_none" for no operation on a pretimeout, +"preop_panic" to set the preoperation to panic, or "preop_give_data" +to provide data to read from the watchdog device when the pretimeout +occurs. A "pre_nmi" setting CANNOT be used with "preop_give_data" +because you can't do data operations from an NMI. + +When preop is set to "preop_give_data", one byte comes ready to read +on the device when the pretimeout occurs. Select and fasync work on +the device, as well. When compiled into the kernel, the kernel command line is available for configuring the watchdog: diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/Documentation/networking/00-INDEX linux.21pre4-ac2/Documentation/networking/00-INDEX --- linux.21pre4/Documentation/networking/00-INDEX 2003-01-29 16:27:21.000000000 +0000 +++ linux.21pre4-ac2/Documentation/networking/00-INDEX 2003-01-06 15:38:40.000000000 +0000 @@ -97,7 +97,8 @@ sis900.txt - SiS 900/7016 Fast Ethernet device driver info. sk98lin.txt - - SysKonnect SK-NET (SK-98xx) Gigabit Ethernet driver info. + - SysKonnect SK-98xx and SK-98xx Gigabit Ethernet Adapter family + driver info. skfp.txt - SysKonnect FDDI (SK-5xxx, Compaq Netelligent) driver info. smc9.txt diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/Documentation/networking/generic-hdlc.txt linux.21pre4-ac2/Documentation/networking/generic-hdlc.txt --- linux.21pre4/Documentation/networking/generic-hdlc.txt 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/Documentation/networking/generic-hdlc.txt 2003-01-28 16:39:15.000000000 +0000 @@ -1,11 +1,13 @@ -Generic HDLC layer for Linux kernel 2.4/2.5 +Generic HDLC layer Krzysztof Halasa -May, 2001 +January, 2003 Generic HDLC layer currently supports: -- Frame Relay (ANSI, CCITT and no LMI), with ARP support (no InARP), -- raw HDLC (IPv4 only), +- Frame Relay (ANSI, CCITT and no LMI), with ARP support (no InARP). + Normal (routed) and Ethernet-bridged (Ethernet device emulation) + interfaces can share a single PVC. +- raw HDLC - either IP (IPv4) interface or Ethernet device emulation. - Cisco HDLC, - PPP (uses syncppp.c), - X.25 (uses X.25 routines). @@ -15,6 +17,10 @@ - RISCom/N2 by SDL Communications Inc. - and others, some not in the official kernel. +Ethernet device emulation (using HDLC or Frame-Relay PVC) is compatible +with IEEE 802.1Q (VLANs) and 802.1D (Ethernet bridging). + + Make sure the hdlc.o and the hardware driver are loaded. It should create a number of "hdlc" (hdlc0 etc) network devices, one for each WAN port. You'll need the "sethdlc" utility, get it from: @@ -58,6 +64,9 @@ no-parity / crc16 / crc16-pr0 (CRC16 with preset zeros) / crc32-itu crc16-itu (CRC16 with ITU-T polynomial) / crc16-itu-pr0 - sets parity +* hdlc-eth - Ethernet device emulation using HDLC. Parity and encoding + as above. + * cisco - sets Cisco HDLC mode (IP, IPv6 and IPX supported) interval - time in seconds between keepalive packets timeout - time in seconds after last received keepalive packet before @@ -77,7 +86,12 @@ n392 - error threshold - both user and network n393 - monitored events count - both user and network -* create | delete n - FR only - adds / deletes PVC interface with DLCI #n. +Frame-Relay only: +* create n | delete n - adds / deletes PVC interface with DLCI #n. + Newly created interface will be named pvc0, pvc1 etc. + +* create ether n | delete ether n - adds a device for Ethernet-bridged + frames. The device will be named pvceth0, pvceth1 etc. @@ -104,11 +118,11 @@ If you have a problem with N2 or C101 card, you can issue the "private" -command to see port's packet descriptor rings: +command to see port's packet descriptor rings (in kernel logs): sethdlc hdlc0 private -The hardware driver have to be build with CONFIG_HDLC_DEBUG_RINGS. +The hardware driver has to be build with CONFIG_HDLC_DEBUG_RINGS. Attaching this info to bug reports would be helpful. Anyway, let me know if you have problems using this. diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/Documentation/networking/sk98lin.txt linux.21pre4-ac2/Documentation/networking/sk98lin.txt --- linux.21pre4/Documentation/networking/sk98lin.txt 2003-01-29 16:27:21.000000000 +0000 +++ linux.21pre4-ac2/Documentation/networking/sk98lin.txt 2003-01-06 15:38:40.000000000 +0000 @@ -1,120 +1,158 @@ -(C)Copyright 1999-2001 SysKonnect GmbH. +(C)Copyright 1999-2002 SysKonnect GmbH. All rights reserved =========================================================================== -sk98lin.txt created 28-May-2001 +sk98lin.txt created 19-Dec-2002 -Readme File for sk98lin v4.06 -SK-NET Gigabit Ethernet PCI driver for LINUX +Readme File for sk98lin v6.02 +SysKonnect SK-98xx Gigabit Ethernet Adapter family driver for LINUX This file contains -(1) OVERVIEW -(2) REQUIRED FILES -(3) INSTALLATION -(4) INCLUSION OF ADAPTER AT SYSTEM START -(5) DRIVER PARAMETERS -(6) LARGE FRAME SUPPORT -(7) TROUBLESHOOTING -(8) HISTORY + 1 Overview + 2 Required Files + 3 Installation + 3.1 Driver Installation + 3.2 Inclusion of adapter at system start + 4 Driver Parameters + 4.1 Per-Port Parameters + 4.2 Adapter Parameters + 5 Large Frame Support + 6 VLAN and Link Aggregation Support (IEEE 802.1, 802.1q, 802.3ad) + 7 Troubleshooting + 8 History =========================================================================== -(1) OVERVIEW -============ +1 Overview +=========== -The sk98lin driver supports the SysKonnect SK-NET Gigabit Ethernet -Adapter SK-98xx family on Linux 2.2.x and above. -It has been tested with Linux on Intel/x86 machines. -From v3.02 on, the driver is integrated in the linux kernel source. +The sk98lin driver supports the SysKonnect SK-98xx and SK-95xx family +on Linux. It has been tested with Linux on Intel/x86 machines. *** -(2) REQUIRED FILES -================== +2 Required Files +================= The linux kernel source. No additional files required. *** -(3) INSTALLATION -================ +3 Installation +=============== + +It is recommended to download the latest version of the driver from the +SysKonnect web site www.syskonnect.com. If you have downloaded the latest +driver, the Linux kernel has to be patched before the driver can be +installed. For details on how to patch a Linux kernel, refer to the +patch.txt file. + +3.1 Driver Installation +------------------------ The following steps describe the actions that are required to install the driver and to start it manually. These steps should be carried out for the initial driver setup. Once confirmed to be ok, they can -be included in the system start which is described in the next -chapter. +be included in the system start. -NOTE 1: You must have 'root' access to the system to perform - the following tasks. -NOTE 2: IMPORTANT: In case of problems, please read the section - "Troubleshooting" below. - -1) The driver can either be integrated into the kernel or it can - be compiled as a module. - Select the appropriate option during the kernel configuration. - For use as a module, your kernel must have - 'loadable module support' enabled. - For automatic driver start, you also need 'Kernel module loader' - enabled. - Configure those options, build and install the new kernel. If you - choose to use the driver as a module, do "make modules" and - "make modules_install". - Reboot your system. - -2) Load the module manually by entering: - modprobe sk98lin - If the SysKonnect SK-98xx adapter is installed in your - computer and you have a /proc filesystem, running the command - 'more /proc/net/dev' should produce an output containing a - line with the following format: - eth0: 0 0 ... - which means that your adapter has been found and initialized. +NOTE 1: To perform the following tasks you need 'root' access. + +NOTE 2: In case of problems, please read the section "Troubleshooting" + below. + +The driver can either be integrated into the kernel or it can be compiled +as a module. Select the appropriate option during the kernel +configuration. + +Compile/use the driver as a module +---------------------------------- +To compile the driver, go to the directory /usr/src/linux and +execute the command "make menuconfig" or "make xconfig" and proceed as +follows: + +To integrate the driver permanently into the kernel, proceed as follows: + +1. Select the menu "Network device support" and then "Ethernet(1000Mbit)" +2. Mark "SysKonnect SK-98xx and SK-95xx Gigabit Ethernet Adapter + family support" with (*) +3. Build a new kernel when the configuration of the above options is + finished. +4. Install the new kernel. +5. Reboot your system. + +To use the driver as a module, proceed as follows: + +1. Enable 'loadable module support' in the kernel. +2. For automatic driver start, enable the 'Kernel module loader'. +3. Select the menu "Network device support" and then "Ethernet(1000Mbit)" +4. Mark "SysKonnect SK-98xx and SK-95xx Gigabit Ethernet Adapter + family support" with (M) +5. Execute the command "make modules". +6. Execute the command "make modules_install". + The appropiate modules will be installed. +7. Reboot your system. + + +Load the module manually +------------------------ +To load the module manually, proceed as follows: + +1. Enter "modprobe sk98lin". +2. If the SysKonnect SK-98xx adapter is installed in your computer and you + have a /proc file system, execute the command: + "ls /proc/net/sk98lin/" + This should produce an output containing a line with the following + format: + eth0 eth1 ... + which indicates that your adapter has been found and initialized. - NOTE 1: If you have more than one SysKonnect SK-98xx adapter, the - adapters will be listed as 'eth0', 'eth1', 'eth2', etc. - For each adapter, repeat the steps 3) and 4). - NOTE 2: If you have other Ethernet adapters installed, - your SysKonnect SK-98xx adapter can be mapped to 'eth1' or - 'eth2' ... - The module installation message (in system logfile or - on console, depending on /etc/syslog.conf) prints a line - for each adapter that is found, containing the - corresponding 'ethX'. + NOTE 1: If you have more than one SysKonnect SK-98xx adapter installed, + the adapters will be listed as 'eth0', 'eth1', 'eth2', etc. + For each adapter, repeat steps 3 and 4 below. + + NOTE 2: If you have other Ethernet adapters installed, your SysKonnect + SK-98xx adapter will be mapped to the next available number, + e.g. 'eth1'. The mapping is executed automatically. + The module installation message (displayed either in a system + log file or on the console) prints a line for each adapter + found containing the corresponding 'ethX'. -3) Select an IP address and assign it to the respective adapter by +3. Select an IP address and assign it to the respective adapter by entering: - ifconfig eth0 - This causes the adapter to connect to the ethernet. The solitary - yellow LED at the adapter is now active, the link status LED of - the primary port is on and the link status LED of the secondary - port (on dual port adapters) is blinking (only if the laters are - connected to a switch or hub). - You will also get a status message on the console saying - "ethX: network connection up using port Y" and indicating - the selected connection parameters. + ifconfig eth0 + With this command, the adapter is connected to the Ethernet. + SK-98xx Gigabit Ethernet Server Adapters: The yellow LED on the adapter + is now active, the link status LED of the primary port is active and + the link status LED of the secondary port (on dual port adapters) is + blinking (if the ports are connected to a switch or hub). + SK-98xx V2.0 Gigabit Ethernet Adapters: The link status LED is active. + In addition, you will receive a status message on the console stating + "ethX: network connection up using port Y" and showing the selected + connection parameters (x stands for the ethernet device number + (0,1,2, etc), y stands for the port name (A or B)). + NOTE: If you are in doubt about IP addresses, ask your network administrator for assistance. + +4. Your adapter should now be fully operational. + Use 'ping ' to verify the connection to other computers + on your network. +5. To check the adapter configuration view /proc/net/sk98lin/[devicename]. + For example by executing: + "cat /proc/net/sk98lin/eth0" + +Unload the module +----------------- +To stop and unload the driver modules, proceed as follows: -4) Your adapter should now be fully operational. - Use 'ping ' to verify the connection to other - computers on your network. - By viewing /proc/net/sk98lin/[devicename], you can check some - information regarding to the adapter configuration. - - -5) The driver module can be stopped and unloaded using the following - commands: - ifconfig eth0 down - rmmod sk98lin -*** - +1. Execute the command "ifconfig eth0 down". +2. Execute the command "rmmod sk98lin". -(4) INCLUSION OF ADAPTER AT SYSTEM START -======================================== +3.2 Inclusion of adapter at system start +----------------------------------------- Since a large number of different Linux distributions are available, we are unable to describe a general installation procedure @@ -122,41 +160,45 @@ Because the driver is now integrated in the kernel, installation should be easy, using the standard mechanism of your distribution. Refer to the distribution's manual for installation of ethernet adapters. + *** +4 Driver Parameters +==================== -(5) DRIVER PARAMETERS -===================== +Parameters can be set at the command line after the module has been +loaded with the command 'modprobe'. +In some distributions, the configuration tools are able to pass parameters +to the driver module. -Parameters can be set at the command line while loading the -module with 'modprobe'. The configuration tools of some distributions -can also give parameters to the driver module. If you use the kernel module loader, you can set driver parameters in the file /etc/modules.conf (or old name: /etc/conf.modules). -Insert a line of the form: - -options sk98lin ... +To set the driver parameters in this file, proceed as follows: -For "...", use the same syntax as described below for the command -line paramaters of modprobe. -You either have to reboot your computer or unload and reload -the driver to activate the new parameters. -The syntax of the driver parameters is: - -modprobe sk98lin parameter=value1[,value2[,value3...]] - -value1 is for the first adapter, value2 for the second one etc. -All Parameters are case sensitive, so write them exactly as -shown below. - -Sample: Suppose you have two adapters. You want to set AutoNegotiation - on Port A of the first adapter to ON and on Port A of the - second adapter to OFF. - You also want to set DuplexCapabilities on Port A of the first - adapter to FULL and on Port A of the second adapter to HALF. - You must enter: +1. Insert a line of the form : + options sk98lin ... + For "...", the same syntax is required as described for the command + line paramaters of modprobe below. +2. To activate the new parameters, either reboot your computer + or + unload and reload the driver. + The syntax of the driver parameters is: + + modprobe sk98lin parameter=value1[,value2[,value3...]] + + where value1 refers to the first adapter, value2 to the second etc. + +NOTE: All parameters are case sensitive. Write them exactly as shown + below. + +Example: +Suppose you have two adapters. You want to set auto-negotiation +on the first adapter to ON and on the second adapter to OFF. +You also want to set DuplexCapabilities on the first adapter +to FULL, and on the second adapter to HALF. +Then, you must enter: - modprobe sk98lin AutoNeg_A=On,Off DupCap_A=Full,Half + modprobe sk98lin AutoNeg=On,Off DupCap=Full,Half NOTE: The number of adapters that can be configured this way is limited in the driver (file skge.c, constant SK_MAX_CARD_PARAM). @@ -164,221 +206,324 @@ more adapters, adjust this and recompile. -5.1 Per-Port Parameters ------------------------ -Those setting are available for each port on the adapter. +4.1 Per-Port Parameters +------------------------ + +These settings are available for each port on the adapter. In the following description, '?' stands for the port for which you set the parameter (A or B). -- Auto Negotiation - Parameter: AutoNeg_? - Values: On, Off, Sense - Default: Sense +Speed +----- +Parameter: Speed_? +Values: 10, 100, 1000, Auto +Default: Auto + +This parameter is used to set the speed capabilities. It is only valid +for the SK-98xx V2.0 copper adapters. +Usually, the speed is negotiated between the two ports during link +establishment. If this fails, a port can be forced to a specific setting +with this parameter. + +Auto-Negotiation +---------------- +Parameter: AutoNeg_? +Values: On, Off, Sense +Default: On - The "Sense"-mode finds out automatically whether the link - partner supports autonegotiation or not. +The "Sense"-mode automatically detects whether the link partner supports +auto-negotiation or not. -- Duplex Capabilities - Parameter: DupCap_? - Values: Half, Full, Both - Default: Both - - This parameters is relevant only if autonegotiation for - this port is not "Sense". If autonegotiation is "On", all - three values are possible. If it is "Off", only "Full" and - "Half" are allowed. - It is usefull if your link partner does not support all - possible combinations. - -- Flow Control - Parameter: FlowCtrl_? - Values: Sym, SymOrRem, LocSend, None - Default: SymOrRem - - This parameter can be used to set the flow control capabilities - that the port reports during autonegotiation. - The meaning of the different modes is: --- Sym = Symetric: both link partners are allowed to send PAUSE frames --- SymOrRem = SymetricOrRemote: both or only remote partner are allowed - to send PAUSE frames --- LocSend = LocalSend: only local link partner is allowed to send - PAUSE frames --- None: no link partner is allowed to send PAUSE frames +Duplex Capabilities +------------------- +Parameter: DupCap_? +Values: Half, Full, Both +Default: Both + +This parameters is only relevant if auto-negotiation for this port is +not set to "Sense". If auto-negotiation is set to "On", all three values +are possible. If it is set to "Off", only "Full" and "Half" are allowed. +This parameter is usefull if your link partner does not support all +possible combinations. + +Flow Control +------------ +Parameter: FlowCtrl_? +Values: Sym, SymOrRem, LocSend, None +Default: SymOrRem + +This parameter can be used to set the flow control capabilities the +port reports during auto-negotiation. It can be set for each port +individually. +Possible modes: + -- Sym = Symetric: both link partners are allowed to send + PAUSE frames + -- SymOrRem = SymetricOrRemote: both or only remote partner + are allowed to send PAUSE frames + -- LocSend = LocalSend: only local link partner is allowed + to send PAUSE frames + -- None = no link partner is allowed to send PAUSE frames - NOTE: This parameter is ignored if autonegotiation is set to "Off". +NOTE: This parameter is ignored if auto-negotiation is set to "Off". -- Role in Master-Slave-Negotiation (1000Base-T only). - Parameter: Role_? - Values: Auto, Master, Slave - Default: Auto - - This parameter is only valid for the SK-9821 and SK-9822 adapters. - For two 1000Base-T ports to communicate, one must take the role as - master (providing timing information), while the other must be slave. - Normally, this is negotiated between the two ports during link - establishment. If this should ever fail, you can force a port to a - specific setting with this parameter. - +Role in Master-Slave-Negotiation (1000Base-T only) +-------------------------------------------------- +Parameter: Role_? +Values: Auto, Master, Slave +Default: Auto + +This parameter is only valid for the SK-9821 and SK-9822 adapters. +For two 1000Base-T ports to communicate, one must take the role of the +master (providing timing information), while the other must be the +slave. Usually, this is negotiated between the two ports during link +establishment. If this fails, a port can be forced to a specific setting +with this parameter. -5.2 Per-Adapter Parameters --------------------------- -- Preferred Port - Parameter: PrefPort - Values: A, B - Default: A - - This is used to force the preferred port to A or B (on two-port NICs). - The preferred port is the one that is used if both are detected as - fully functional. - -- RLMT (Redundant Link Management Technology) Mode - Parameter: RlmtMode - Values: CheckLinkState,CheckLocalPort, CheckSeg, DualNet - Default: CheckLinkState - - RLMT (the driver part that decides which port to use) knows three - ways of checking if a port is available for use: - --- CheckLinkState = Check link state only: RLMT uses the link state - reported by the adapter hardware for each individual port to determine - whether a port can be used for all network traffic or not. - --- CheckLocalPort - Check other port on adapter: RLMT sends test frames - from each port to each other port and checks if they are received by - the other port, respectively. Thus, the ports must be connected to the - network such that LLC test frames can be exchanged between them - (i.e. there must be no routers between the ports). - --- CheckSeg - Check other port and segmentation: RLMT checks the other port - and in addition requests information from the Gigabit Ethernet - switch next to each port to see if the network is segmented between - the ports. Thus, this mode is only to be used if you have Gigabit - Ethernet switches installed in your network that have been configured - to use the Spanning Tree protocol. - --- DualNet - Both ports A and B are used as separate devices at the same - time. So if you have a dual port adapter, port A will show up as eth0 - and port B as eth1. Both ports can be used independend with distinct - IP addresses. - The preferred port setting is not used. Rlmt is turned off. - +4.2 Adapter Parameters +----------------------- - NOTE: The modes CheckLocalPort and CheckSeg are meant to operate in - configurations where a network path between the ports on one - adapter exists. Especially, they are not designed to work where - adapters are connected back-to-back. +Preferred Port +-------------- +Parameter: PrefPort +Values: A, B +Default: A + +This is used to force the preferred port to A or B (on dual-port network +adapters). The preferred port is the one that is used if both are detected +as fully functional. + +RLMT Mode (Redundant Link Management Technology) +------------------------------------------------ +Parameter: RlmtMode +Values: CheckLinkState,CheckLocalPort, CheckSeg, DualNet +Default: CheckLinkState + +RLMT monitors the status of the port. If the link of the active port +fails, RLMT switches immediately to the standby link. The virtual link is +maintained as long as at least one 'physical' link is up. + +Possible modes: + + -- CheckLinkState - Check link state only: RLMT uses the link state + reported by the adapter hardware for each individual port to + determine whether a port can be used for all network traffic or + not. + + -- CheckLocalPort - In this mode, RLMT monitors the network path + between the two ports of an adapter by regularly exchanging packets + between them. This mode requires a network configuration in which + the two ports are able to "see" each other (i.e. there must not be + any router between the ports). + + -- CheckSeg - Check local port and segmentation: This mode supports the + same functions as the CheckLocalPort mode and additionally checks + network segmentation between the ports. Therefore, this mode is only + to be used if Gigabit Ethernet switches are installed on the network + that have been configured to use the Spanning Tree protocol. + + -- DualNet - In this mode, ports A and B are used as separate devices. + If you have a dual port adapter, port A will be configured as eth0 + and port B as eth1. Both ports can be used independently with + distinct IP addresses. The preferred port setting is not used. + RLMT is turned off. + +NOTE: RLMT modes CLP and CLPSS are designed to operate in configurations + where a network path between the ports on one adapter exists. + Moreover, they are not designed to work where adapters are connected + back-to-back. *** -(6) LARGE FRAME SUPPORT -======================= +5 Large Frame Support +====================== -Large frames (also called jumbo frames) are now supported by the -driver. This can result in a greatly improved throughput if -transfering large amounts of data. -To enable large frames, set the MTU (maximum transfer unit) -of the interface to the value you wish (up to 9000). The command -for this is: - ifconfig eth0 mtu 9000 +The driver supports large frames (also called jumbo frames). Using large +frames can result in an improved throughput if transferring large amounts +of data. +To enable large frames, set the MTU (maximum transfer unit) of the +interface to the desired value (up to 9000), execute the following +command: + ifconfig eth0 mtu 9000 This will only work if you have two adapters connected back-to-back -or if you use a switch that supports large frames. When using a -switch, it should be configured to allow large frames, without -autonegotiating for them. -The setting must be done on all adapters that can be reached by -the large frames. If one adapter is not set to receive large frames, -it will simply drop them. +or if you use a switch that supports large frames. When using a switch, +it should be configured to allow large frames and auto-negotiation should +be set to OFF. The setting must be configured on all adapters that can be +reached by the large frames. If one adapter is not set to receive large +frames, it will simply drop them. + +You can switch back to the standard ethernet frame size by executing the +following command: + ifconfig eth0 mtu 1500 -You can switch back to the standard ethernet frame size with: - ifconfig eth0 mtu 1500 - -To make this setting persitent, add a script with the 'ifconfig' -line to the system startup sequence (named something like "S99sk98lin" +To permanently configure this setting, add a script with the 'ifconfig' +line to the system startup sequence (named something like "S99sk98lin" in /etc/rc.d/rc2.d). *** -(7) TROUBLESHOOTING -=================== +6 VLAN and Link Aggregation Support (IEEE 802.1, 802.1q, 802.3ad) +================================================================== + +The SysKonnect Linux drivers are able to support VLAN and Link Aggregation +according to IEEE standards 802.1, 802.1q, and 802.3ad. These features are +only available after installation of open source modules available on the +Internet: +For VLAN go to: http://scry.wanfear.com/~greear/vlan.html +For Link Aggregation go to: http://www.st.rim.or.jp/~yumo + +NOTE: SysKonnect GmbH does not offer any support for these open source + modules and does not take the responsibility for any kind of + failures or problems arising in connection with these modules. + +NOTE: Configuring Link Aggregation on a SysKonnect dual link adapter may + cause problems when unloading the driver. + + +7 Troubleshooting +================== + +If any problems occur during the installation process, check the +following list: -If you run into problems during installation, check those items: Problem: The SK-98xx adapter can not be found by the driver. -Reason: Look in /proc/pci for the following entry: +Solution: In /proc/pci search for the following entry: 'Ethernet controller: SysKonnect SK-98xx ...' - If this entry exists, then the SK-98xx adapter has been - found by the system and should be able to be used. - If this entry does not exist or if the file '/proc/pci' - is not there, then you may have a hardware problem or PCI - support may not be enabled in your kernel. - The adapter can be checked using the diagnostic program - which is available from the SysKonnect web site: - www.syskonnect.de - Some COMPAQ machines have a problem with PCI under - Linux. This is described in the 'PCI howto' document - (included in some distributions or available from the - www, e.g. at 'www.linux.org'). This might be fixed in the - 2.2.x kernel series (I've not tested it). - -Problem: Programs such as 'ifconfig' or 'route' can not be found or - you get an error message 'Operation not permitted'. -Reason: You are not logged in as user 'root'. Logout and - login as root or change to root via 'su'. - -Problem: Using the command 'ping
', you get a message - "ping: sendto: Network is unreachable". -Reason: Your route is not set up correct. - If you are using RedHat, you probably forgot - to set up the route in 'network configuration'. - Check the existing routes with the 'route' command - and check if there is an entry for 'eth0' and if - it is correct. - -Problem: The driver can be started, the adapter is connected - to the network, but you can not receive or transmit - any packet; e.g. 'ping' does not work. -Reason: You have an incorrect route in your routing table. - Check the routing table with the command 'route' and - read the manual pages about route ('man route'). -NOTE: Although the 2.2.x kernel versions generate the routing - entry automatically, you may have problems of this kind - here, too. We found a case where the driver started correct - at system boot, but after removing and reloading the driver, - the route of the adapter's network pointed to the 'dummy0' - device and had to be corrected manually. - -Problem: You want to use your computer as a router between - multiple IP subnetworks (using multiple adapters), but - you can not reach computers in other subnetworks. -Reason: Either the router's kernel is not configured for IP - forwarding or there is a problem with the routing table - and gateway configuration in at least one of the - computers. - -Problem: At the start of the driver, you get an error message: - "eth0: -- ERROR -- - Class: internal Software error - Nr: 0xcc - Msg: SkGeInitPort() cannot init running ports" -Reason: You are using a driver compiled for single processor - machines on an multiprocessor machine with SMP (Symetric - MultiProcessor) kernel. - Configure your kernel appropriate and recompile the kernel or - the modules. + If this entry exists, the SK-98xx or SK-98xx V2.0 adapter has + been found by the system and should be operational. + If this entry does not exist or if the file '/proc/pci' is not + found, there may be a hardware problem or the PCI support may + not be enabled in your kernel. + The adapter can be checked using the diagnostics program which + is available on the SysKonnect web site: + www.syskonnect.com + + Some COMPAQ machines have problems dealing with PCI under Linux. + Linux. This problem is described in the 'PCI howto' document + (included in some distributions or available from the + web, e.g. at 'www.linux.org'). + + +Problem: Programs such as 'ifconfig' or 'route' can not be found or the + error message 'Operation not permitted' is displayed. +Reason: You are not logged in as user 'root'. +Solution: Logout and login as 'root' or change to 'root' via 'su'. + + +Problem: Upon use of the command 'ping
' the message + "ping: sendto: Network is unreachable" is displayed. +Reason: Your route is not set correctly. +Solution: If you are using RedHat, you probably forgot to set up the + route in the 'network configuration'. + Check the existing routes with the 'route' command and check + if an entry for 'eth0' exists, and if so, if it is set correctly. + + +Problem: The driver can be started, the adapter is connected to the + network, but you cannot receive or transmit any packets; + e.g. 'ping' does not work. +Reason: There is an incorrect route in your routing table. +Solution: Check the routing table with the command 'route' and read the + manual help pages dealing with routes (enter 'man route'). + +NOTE: Although the 2.2.x kernel versions generate the routing entry + automatically, problems of this kind may occur here as well. We've + come across a situation in which the driver started correctly at + system start, but after the driver has been removed and reloaded, + the route of the adapter's network pointed to the 'dummy0'device + and had to be corrected manually. + + +Problem: Your computer should act as a router between multiple + IP subnetworks (using multiple adapters), but computers in + other subnetworks cannot be reached. +Reason: Either the router's kernel is not configured for IP forwarding + or the routing table and gateway configuration of at least one + computer is not working. + +Problem: Upon driver start, the following error message is displayed: + "eth0: -- ERROR -- + Class: internal Software error + Nr: 0xcc + Msg: SkGeInitPort() cannot init running ports" +Reason: You are using a driver compiled for single processor machines + on a multiprocessor machine with SMP (Symetric MultiProcessor) + kernel. +Solution: Configure your kernel appropriately and recompile the kernel or + the modules. + + If your problem is not listed here, please contact SysKonnect's technical support for help (linux@syskonnect.de). -When contacting our technical support, please ensure that the -following information is available: +When contacting our technical support, please ensure that the following +information is available: - System Manufacturer and Model - Boards in your system - Distribution - Kernel version +- Driver version *** +8 History +========== -(8) HISTORY -=========== +VERSION 6.00 (In-Kernel version) +New Features: +- Support for SK-98xx V2.0 adapters +- Support for gmac +- Support for kernel 2.4.x and kernel 2.2.x +- Zerocopy support for kernel 2.4.x with sendfile() +- Support for scatter-gather functionality with sendfile() +- Speed support for SK-98xx V2.0 adapters +- New ProcFs entries +- New module parameters +Problems fixed: +- ProcFS initialization +- csum packet error +- Ierror/crc counter error (#10767) +- rx_too_long counter error (#10751) +Known limitations: +- None + +VERSION 4.11 +New Features: +- none +Problems fixed: +- Error statistic counter fix (#10620) +- RLMT-Fixes (#10659, #10639, #10650) +- LM80 sensor initialization fix (#10623) +- SK-CSUM memory fixes (#10610). +Known limitations: +- None + +VERSION 4.10 +New Features: +- New ProcFs entries +Problems fixed: +- Corrected some printk's +Known limitations: +- None + +VERSION 4.09 +New Features: +- IFF_RUNNING support (link status) +- New ProcFs entries +Problems fixed: +- too long counters +- too short counters +- Kernel error compilation +Known limitations: +- None + +VERSION 4.06 (In-Kernel version) +Problems fixed: +- MTU init problems + +VERSION 4.04 +Problems fixed: +- removed VLAN error messages VERSION 4.02 (In-Kernel version) New Features: @@ -518,3 +663,4 @@ ***End of Readme File*** + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/Documentation/sched-coding.txt linux.21pre4-ac2/Documentation/sched-coding.txt --- linux.21pre4/Documentation/sched-coding.txt 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/Documentation/sched-coding.txt 2003-01-06 19:13:57.000000000 +0000 @@ -0,0 +1,129 @@ + Reference for various scheduler-related methods in the O(1) scheduler + Robert Love , MontaVista Software + + +Note most of these methods are local to kernel/sched.c - this is by design. +The scheduler is meant to be self-contained and abstracted away. This document +is primarily for understanding the scheduler, not interfacing to it. Some of +the discussed interfaces, however, are general process/scheduling methods. +They are typically defined in include/linux/sched.h. + + +Main Scheduling Methods +----------------------- + +void load_balance(runqueue_t *this_rq, int idle) + Attempts to pull tasks from one cpu to another to balance cpu usage, + if needed. This method is called explicitly if the runqueues are + inbalanced or periodically by the timer tick. Prior to calling, + the current runqueue must be locked and interrupts disabled. + +void schedule() + The main scheduling function. Upon return, the highest priority + process will be active. + + +Locking +------- + +Each runqueue has its own lock, rq->lock. When multiple runqueues need +to be locked, lock acquires must be ordered by ascending &runqueue value. + +A specific runqueue is locked via + + task_rq_lock(task_t pid, unsigned long *flags) + +which disables preemption, disables interrupts, and locks the runqueue pid is +running on. Likewise, + + task_rq_unlock(task_t pid, unsigned long *flags) + +unlocks the runqueue pid is running on, restores interrupts to their previous +state, and reenables preemption. + +The routines + + double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) + +and + + double_rq_unlock(runqueue_t *rq1, runqueue_t rq2) + +safely lock and unlock, respectively, the two specified runqueues. They do +not, however, disable and restore interrupts. Users are required to do so +manually before and after calls. + + +Values +------ + +MAX_PRIO + The maximum priority of the system, stored in the task as task->prio. + Lower priorities are higher. Normal (non-RT) priorities range from + MAX_RT_PRIO to (MAX_PRIO - 1). +MAX_RT_PRIO + The maximum real-time priority of the system. Valid RT priorities + range from 0 to (MAX_RT_PRIO - 1). +MAX_USER_RT_PRIO + The maximum real-time priority that is exported to user-space. Should + always be equal to or less than MAX_RT_PRIO. Setting it less allows + kernel threads to have higher priorities than any user-space task. +MIN_TIMESLICE +MAX_TIMESLICE + Respectively, the minimum and maximum timeslices (quanta) of a process. + +Data +---- + +struct runqueue + The main per-CPU runqueue data structure. +struct task_struct + The main per-process data structure. + + +General Methods +--------------- + +cpu_rq(cpu) + Returns the runqueue of the specified cpu. +this_rq() + Returns the runqueue of the current cpu. +task_rq(task) + Returns the runqueue which holds the specified task. +cpu_curr(cpu) + Returns the task currently running on the given cpu. +rt_task(task) + Returns true if task is real-time, false if not. +task_cpu(task) + + +Process Control Methods +----------------------- + +void set_user_nice(task_t *p, long nice) + Sets the "nice" value of task p to the given value. +int setscheduler(pid_t pid, int policy, struct sched_param *param) + Sets the scheduling policy and parameters for the given pid. +void set_cpus_allowed(task_t *p, unsigned long new_mask) + Sets a given task's CPU affinity and migrates it to a proper cpu. + Callers must have a valid reference to the task and assure the + task not exit prematurely. No locks can be held during the call. +set_task_state(tsk, state_value) + Sets the given task's state to the given value. +set_current_state(state_value) + Sets the current task's state to the given value. +void set_tsk_need_resched(struct task_struct *tsk) + Sets need_resched in the given task. +void clear_tsk_need_resched(struct task_struct *tsk) + Clears need_resched in the given task. +void set_need_resched() + Sets need_resched in the current task. +void set_task_cpu(task, cpu) + Sets task->cpu to cpu on SMP. Noop on UP. +void clear_need_resched() + Clears need_resched in the current task. +int need_resched() + Returns true if need_resched is set in the current task, false + otherwise. +yield() + Place the current process at the end of the runqueue and call schedule. diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/Documentation/sched-design.txt linux.21pre4-ac2/Documentation/sched-design.txt --- linux.21pre4/Documentation/sched-design.txt 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/Documentation/sched-design.txt 2003-01-06 19:13:57.000000000 +0000 @@ -0,0 +1,165 @@ + Goals, Design and Implementation of the + new ultra-scalable O(1) scheduler + + + This is an edited version of an email Ingo Molnar sent to + lkml on 4 Jan 2002. It describes the goals, design, and + implementation of Ingo's new ultra-scalable O(1) scheduler. + Last Updated: 18 April 2002. + + +Goal +==== + +The main goal of the new scheduler is to keep all the good things we know +and love about the current Linux scheduler: + + - good interactive performance even during high load: if the user + types or clicks then the system must react instantly and must execute + the user tasks smoothly, even during considerable background load. + + - good scheduling/wakeup performance with 1-2 runnable processes. + + - fairness: no process should stay without any timeslice for any + unreasonable amount of time. No process should get an unjustly high + amount of CPU time. + + - priorities: less important tasks can be started with lower priority, + more important tasks with higher priority. + + - SMP efficiency: no CPU should stay idle if there is work to do. + + - SMP affinity: processes which run on one CPU should stay affine to + that CPU. Processes should not bounce between CPUs too frequently. + + - plus additional scheduler features: RT scheduling, CPU binding. + +and the goal is also to add a few new things: + + - fully O(1) scheduling. Are you tired of the recalculation loop + blowing the L1 cache away every now and then? Do you think the goodness + loop is taking a bit too long to finish if there are lots of runnable + processes? This new scheduler takes no prisoners: wakeup(), schedule(), + the timer interrupt are all O(1) algorithms. There is no recalculation + loop. There is no goodness loop either. + + - 'perfect' SMP scalability. With the new scheduler there is no 'big' + runqueue_lock anymore - it's all per-CPU runqueues and locks - two + tasks on two separate CPUs can wake up, schedule and context-switch + completely in parallel, without any interlocking. All + scheduling-relevant data is structured for maximum scalability. + + - better SMP affinity. The old scheduler has a particular weakness that + causes the random bouncing of tasks between CPUs if/when higher + priority/interactive tasks, this was observed and reported by many + people. The reason is that the timeslice recalculation loop first needs + every currently running task to consume its timeslice. But when this + happens on eg. an 8-way system, then this property starves an + increasing number of CPUs from executing any process. Once the last + task that has a timeslice left has finished using up that timeslice, + the recalculation loop is triggered and other CPUs can start executing + tasks again - after having idled around for a number of timer ticks. + The more CPUs, the worse this effect. + + Furthermore, this same effect causes the bouncing effect as well: + whenever there is such a 'timeslice squeeze' of the global runqueue, + idle processors start executing tasks which are not affine to that CPU. + (because the affine tasks have finished off their timeslices already.) + + The new scheduler solves this problem by distributing timeslices on a + per-CPU basis, without having any global synchronization or + recalculation. + + - batch scheduling. A significant proportion of computing-intensive tasks + benefit from batch-scheduling, where timeslices are long and processes + are roundrobin scheduled. The new scheduler does such batch-scheduling + of the lowest priority tasks - so nice +19 jobs will get + 'batch-scheduled' automatically. With this scheduler, nice +19 jobs are + in essence SCHED_IDLE, from an interactiveness point of view. + + - handle extreme loads more smoothly, without breakdown and scheduling + storms. + + - O(1) RT scheduling. For those RT folks who are paranoid about the + O(nr_running) property of the goodness loop and the recalculation loop. + + - run fork()ed children before the parent. Andrea has pointed out the + advantages of this a few months ago, but patches for this feature + do not work with the old scheduler as well as they should, + because idle processes often steal the new child before the fork()ing + CPU gets to execute it. + + +Design +====== + +the core of the new scheduler are the following mechanizms: + + - *two*, priority-ordered 'priority arrays' per CPU. There is an 'active' + array and an 'expired' array. The active array contains all tasks that + are affine to this CPU and have timeslices left. The expired array + contains all tasks which have used up their timeslices - but this array + is kept sorted as well. The active and expired array is not accessed + directly, it's accessed through two pointers in the per-CPU runqueue + structure. If all active tasks are used up then we 'switch' the two + pointers and from now on the ready-to-go (former-) expired array is the + active array - and the empty active array serves as the new collector + for expired tasks. + + - there is a 64-bit bitmap cache for array indices. Finding the highest + priority task is thus a matter of two x86 BSFL bit-search instructions. + +the split-array solution enables us to have an arbitrary number of active +and expired tasks, and the recalculation of timeslices can be done +immediately when the timeslice expires. Because the arrays are always +access through the pointers in the runqueue, switching the two arrays can +be done very quickly. + +this is a hybride priority-list approach coupled with roundrobin +scheduling and the array-switch method of distributing timeslices. + + - there is a per-task 'load estimator'. + +one of the toughest things to get right is good interactive feel during +heavy system load. While playing with various scheduler variants i found +that the best interactive feel is achieved not by 'boosting' interactive +tasks, but by 'punishing' tasks that want to use more CPU time than there +is available. This method is also much easier to do in an O(1) fashion. + +to establish the actual 'load' the task contributes to the system, a +complex-looking but pretty accurate method is used: there is a 4-entry +'history' ringbuffer of the task's activities during the last 4 seconds. +This ringbuffer is operated without much overhead. The entries tell the +scheduler a pretty accurate load-history of the task: has it used up more +CPU time or less during the past N seconds. [the size '4' and the interval +of 4x 1 seconds was found by lots of experimentation - this part is +flexible and can be changed in both directions.] + +the penalty a task gets for generating more load than the CPU can handle +is a priority decrease - there is a maximum amount to this penalty +relative to their static priority, so even fully CPU-bound tasks will +observe each other's priorities, and will share the CPU accordingly. + +the SMP load-balancer can be extended/switched with additional parallel +computing and cache hierarchy concepts: NUMA scheduling, multi-core CPUs +can be supported easily by changing the load-balancer. Right now it's +tuned for my SMP systems. + +i skipped the prev->mm == next->mm advantage - no workload i know of shows +any sensitivity to this. It can be added back by sacrificing O(1) +schedule() [the current and one-lower priority list can be searched for a +that->mm == current->mm condition], but costs a fair number of cycles +during a number of important workloads, so i wanted to avoid this as much +as possible. + +- the SMP idle-task startup code was still racy and the new scheduler +triggered this. So i streamlined the idle-setup code a bit. We do not call +into schedule() before all processors have started up fully and all idle +threads are in place. + +- the patch also cleans up a number of aspects of sched.c - moves code +into other areas of the kernel where it's appropriate, and simplifies +certain code paths and data constructs. As a result, the new scheduler's +code is smaller than the old one. + + Ingo diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/Documentation/sonypi.txt linux.21pre4-ac2/Documentation/sonypi.txt --- linux.21pre4/Documentation/sonypi.txt 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/Documentation/sonypi.txt 2003-01-30 16:53:21.000000000 +0000 @@ -44,7 +44,7 @@ to /etc/modules.conf file, when the driver is compiled as a module or by adding the following to the kernel command line (in your bootloader): - sonypi=minor[,verbose[,fnkeyinit[,camera[,compat[,mask]]]]] + sonypi=minor[,verbose[,fnkeyinit[,camera[,compat[,mask[,useinput]]]]]] where: @@ -97,6 +97,11 @@ SONYPI_MEYE_MASK 0x0400 SONYPI_MEMORYSTICK_MASK 0x0800 + useinput: if set (which is the default) jogdial events are + forwarded to the input subsystem as mouse wheel + events. + + Module use: ----------- diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/Documentation/usb/ehci.txt linux.21pre4-ac2/Documentation/usb/ehci.txt --- linux.21pre4/Documentation/usb/ehci.txt 2003-01-29 16:27:25.000000000 +0000 +++ linux.21pre4-ac2/Documentation/usb/ehci.txt 2003-02-04 15:00:43.000000000 +0000 @@ -1,4 +1,4 @@ -26-Apr-2002 +27-Dec-2002 The EHCI driver is used to talk to high speed USB 2.0 devices using USB 2.0-capable host controller hardware. The USB 2.0 standard is @@ -21,11 +21,15 @@ At this writing, this driver has been seen to work with implementations of EHCI from (in alphabetical order): Intel, NEC, Philips, and VIA. +Other EHCI implementations are becoming available from other vendors; +you should expect this driver to work with them too. -At this writing, high speed devices are finally beginning to appear. -While usb-storage devices have been available for some time (working +While usb-storage devices have been available since mid-2001 (working quite speedily on the 2.4 version of this driver), hubs have only -very recently become available. +been available since late 2001, and other kinds of high speed devices +appear to be on hold until more systems come with USB 2.0 built-in. +Such new systems have been available since early 2002, and became much +more typical in the second half of 2002. Note that USB 2.0 support involves more than just EHCI. It requires other changes to the Linux-USB core APIs, including the hub driver, @@ -43,26 +47,29 @@ It's believed to do all the right PCI magic so that I/O works even on systems with interesting DMA mapping issues. -At this writing the driver should comfortably handle all control and bulk -transfers, including requests to USB 1.1 devices through transaction -translators (TTs) in USB 2.0 hubs. However, there some situations where -the hub driver needs to clear TT error state, which it doesn't yet do. - -Interrupt transfer support is newly functional and not yet as robust as -control and bulk traffic. As yet there is no support for split transaction -scheduling for interrupt transfers, which means among other things that -connecting USB 1.1 hubs, keyboards, and mice to USB 2.0 hubs won't work. -Connect them to USB 1.1 hubs, or to a root hub. - -Isochronous (ISO) transfer support is also newly functional. No production -high speed devices are available which would need it (though high quality -webcams are in the works!). Note that split transaction support for ISO +Transfer Types + +At this writing the driver should comfortably handle all control, bulk, +and interrupt transfers, including requests to USB 1.1 devices through +transaction translators (TTs) in USB 2.0 hubs. But you may find bugs. + +High Speed Isochronous (ISO) transfer support is also functional, but +at this writing no Linux drivers have been using that support. + +Full Speed Isochronous transfer support, through transaction translators, +is not yet available. Note that split transaction support for ISO transfers can't share much code with the code for high speed ISO transfers, since EHCI represents these with a different data structure. So for now, -most USB audio and video devices have the same restrictions as hubs, mice, -and keyboards: don't connect them using high speed USB hubs. +most USB audio and video devices can't be connected to high speed buses. + +Driver Behavior -The EHCI root hub code should hand off USB 1.1 devices to its companion +Transfers of all types can be queued. This means that control transfers +from a driver on one interface (or through usbfs) won't interfere with +ones from another driver, and that interrupt transfers can use periods +of one frame without risking data loss due to interrupt processing costs. + +The EHCI root hub code hands off USB 1.1 devices to its companion controller. This driver doesn't need to know anything about those drivers; a OHCI or UHCI driver that works already doesn't need to change just because the EHCI driver is also present. @@ -70,6 +77,11 @@ There are some issues with power management; suspend/resume doesn't behave quite right at the moment. +Also, some shortcuts have been taken with the scheduling periodic +transactions (interrupt and isochronous transfers). These place some +limits on the number of periodic transactions that can be scheduled, +and prevent use of polling intervals of less than one frame. + USE BY @@ -83,10 +95,10 @@ # rmmod ehci-hcd You should also have a driver for a "companion controller", such as -"ohci-hcd", "usb-ohci", "usb-uhci", or "uhci". In case of any trouble -with the EHCI driver, remove its module and then the driver for that -companion controller will take over (at lower speed) all the devices -that were previously handled by the EHCI driver. +"ohci-hcd" or "uhci-hcd". In case of any trouble with the EHCI driver, +remove its module and then the driver for that companion controller will +take over (at lower speed) all the devices that were previously handled +by the EHCI driver. Module parameters (pass to "modprobe") include: @@ -96,9 +108,20 @@ is 6, indicating 2^6 = 64 microframes. This controls how often the EHCI controller can issue interrupts. -The EHCI interrupt handler just acknowledges interrupts and schedules -a tasklet to handle whatever needs handling. That keeps latencies low, -no matter how often interrupts are issued. +If you're using this driver on a 2.5 kernel, and you've enabled USB +debugging support, you'll see three files in the "sysfs" directory for +any EHCI controller: + + "async" dumps the asynchronous schedule, used for control + and bulk transfers. Shows each active qh and the qtds + pending, usually one qtd per urb. (Look at it with + usb-storage doing disk I/O; watch the request queues!) + "periodic" dumps the periodic schedule, used for interrupt + and isochronous transfers. Doesn't show qtds. + "registers" show controller register state, and + +The contents of those files can help identify driver problems. + Device drivers shouldn't care whether they're running over EHCI or not, but they may want to check for "usb_device->speed == USB_SPEED_HIGH". @@ -107,6 +130,11 @@ Also, some values in device descriptors (such as polling intervals for periodic transfers) use different encodings when operating at high speed. +However, do make a point of testing device drivers through USB 2.0 hubs. +Those hubs report some failures, such as disconnections, differently when +transaction translators are in use; some drivers have been seen to behave +badly when they see different faults than OHCI or UHCI report. + PERFORMANCE @@ -122,13 +150,18 @@ and at most 13 of those fit into one USB 2.0 microframe. Eight USB 2.0 microframes fit in a USB 1.1 frame; a microframe is 1 msec/8 = 125 usec. +So more than 50 MByte/sec is available for bulk transfers, when both +hardware and device driver software allow it. Periodic transfer modes +(isochronous and interrupt) allow the larger packet sizes which let you +approach the quoted 480 MBit/sec transfer rate. + Hardware Performance At this writing, individual USB 2.0 devices tend to max out at around 20 MByte/sec transfer rates. This is of course subject to change; and some devices now go faster, while others go slower. -The NEC implementation of EHCI seems to have a hardware bottleneck +The first NEC implementation of EHCI seems to have a hardware bottleneck at around 28 MByte/sec aggregate transfer rate. While this is clearly enough for a single device at 20 MByte/sec, putting three such devices onto one bus does not get you 60 MByte/sec. The issue appears to be @@ -136,9 +169,11 @@ so that it's only trying six (or maybe seven) USB transactions each microframe rather than thirteen. (Seems like a reasonable trade off for a product that beat all the others to market by over a year!) + It's expected that newer implementations will better this, throwing more silicon real estate at the problem so that new motherboard chip -sets will get closer to that 60 MByte/sec target. +sets will get closer to that 60 MByte/sec target. That includes an +updated implementation from NEC, as well as other vendors' silicon. There's a minimum latency of one microframe (125 usec) for the host to receive interrupts from the EHCI controller indicating completion @@ -161,10 +196,16 @@ sequence of 128 KB chunks would waste a lot less. But rather than depending on such large I/O buffers to make synchronous -I/O be efficient, it's better to just queue all several (bulk) requests +I/O be efficient, it's better to just queue up several (bulk) requests to the HC, and wait for them all to complete (or be canceled on error). Such URB queuing should work with all the USB 1.1 HC drivers too. +In the Linux 2.5 kernels, new usb_sg_*() api calls have been defined; they +queue all the buffers from a scatterlist. They also use scatterlist DMA +mapping (which might apply an IOMMU) and IRQ reduction, all of which will +help make high speed transfers run as fast as they can. + + TBD: Interrupt and ISO transfer performance issues. Those periodic transfers are fully scheduled, so the main issue is likely to be how to trigger "high bandwidth" modes. diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/Documentation/vm/overcommit-accounting linux.21pre4-ac2/Documentation/vm/overcommit-accounting --- linux.21pre4/Documentation/vm/overcommit-accounting 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/Documentation/vm/overcommit-accounting 2003-01-06 15:38:40.000000000 +0000 @@ -0,0 +1,70 @@ +* This describes the overcommit management facility in the latest kernel + tree (FIXME: actually it also describes the stuff that isnt yet done) + +The Linux kernel supports four overcommit handling modes + +0 - Heuristic overcommit handling. Obvious overcommits of + address space are refused. Used for a typical system. It + ensures a seriously wild allocation fails while allowing + overcommit to reduce swap usage + +1 - No overcommit handling. Appropriate for some scientific + applications + +2 - (NEW) strict overcommit. The total address space commit + for the system is not permitted to exceed swap + half ram. + In almost all situations this means a process will not be + killed while accessing pages but only by malloc failures + that are reported back by the kernel mmap/brk code. + +3 - (NEW) paranoid overcommit The total address space commit + for the system is not permitted to exceed swap. The machine + will never kill a process accessing pages it has mapped + except due to a bug (ie report it!) + +Gotchas +------- + +The C language stack growth does an implicit mremap. If you want absolute +guarantees and run close to the edge you MUST mmap your stack for the +largest size you think you will need. For typical stack usage is does +not matter much but its a corner case if you really really care + +In modes 2 and 3 the MAP_NORESERVE flag is ignored. + + +How It Works +------------ + +The overcommit is based on the following rules + +For a file backed map + SHARED or READ-only - 0 cost (the file is the map not swap) + PRIVATE WRITABLE - size of mapping per instance + +For an anonymous or /dev/zero map + SHARED - size of mapping + PRIVATE READ-only - 0 cost (but of little use) + PRIVATE WRITABLE - size of mapping per instance + +Additional accounting + Pages made writable copies by mmap + shmfs memory drawn from the same pool + +Status +------ + +o We account mmap memory mappings +o We account mprotect changes in commit +o We account mremap changes in size +o We account brk +o We account munmap +o We report the commit status in /proc +o Account and check on fork +o Review stack handling/building on exec +o SHMfs accounting +o Implement actual limit enforcement + +To Do +----- +o Account ptrace pages (this is hard) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/atm/atmdev_init.c linux.21pre4-ac2/drivers/atm/atmdev_init.c --- linux.21pre4/drivers/atm/atmdev_init.c 2003-01-29 16:27:00.000000000 +0000 +++ linux.21pre4-ac2/drivers/atm/atmdev_init.c 2003-01-10 16:19:21.000000000 +0000 @@ -19,9 +19,6 @@ #ifdef CONFIG_ATM_HORIZON extern int hrz_detect(void); #endif -#ifdef CONFIG_ATM_IA -extern int ia_detect(void); -#endif #ifdef CONFIG_ATM_FORE200E extern int fore200e_detect(void); #endif @@ -53,9 +50,6 @@ #ifdef CONFIG_ATM_HORIZON devs += hrz_detect(); #endif -#ifdef CONFIG_ATM_IA - devs += ia_detect(); -#endif #ifdef CONFIG_ATM_FORE200E devs += fore200e_detect(); #endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/agp/agpgart_be.c linux.21pre4-ac2/drivers/char/agp/agpgart_be.c --- linux.21pre4/drivers/char/agp/agpgart_be.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/agp/agpgart_be.c 2003-01-06 18:22:56.000000000 +0000 @@ -577,7 +577,7 @@ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) SetPageReserved(page); - agp_bridge.gatt_table_real = (unsigned long *) table; + agp_bridge.gatt_table_real = (u32 *) table; agp_gatt_table = (void *)table; #ifdef CONFIG_X86 err = change_page_attr(virt_to_page(table), 1< - + (c) Copyright 2002,2003 Andi Kleen, SuSE Labs + derived from Hardware driver for Intel i810 Random Number Generator (RNG) @@ -238,13 +239,14 @@ */ static struct pci_device_id rng_pci_tbl[] __initdata = { { 0x1022, 0x7443, PCI_ANY_ID, PCI_ANY_ID, }, + { 0x1022, 0x746b, PCI_ANY_ID, PCI_ANY_ID, }, { 0, }, }; MODULE_DEVICE_TABLE (pci, rng_pci_tbl); -MODULE_AUTHOR("Alan Cox, Jeff Garzik, Philipp Rumpf, Matt Sottek"); -MODULE_DESCRIPTION("AMD 768 Random Number Generator (RNG) driver"); +MODULE_AUTHOR("Alan Cox, Jeff Garzik, Philipp Rumpf, Matt Sottek, Andi Kleen"); +MODULE_DESCRIPTION("AMD 768/8111 Random Number Generator (RNG) driver"); MODULE_LICENSE("GPL"); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/cd1865/cd1865.c linux.21pre4-ac2/drivers/char/cd1865/cd1865.c --- linux.21pre4/drivers/char/cd1865/cd1865.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/char/cd1865/cd1865.c 2003-01-11 19:09:03.000000000 +0000 @@ -0,0 +1,2913 @@ +/* -*- linux-c -*- */ +/* + * This code was modified from + * specialix.c -- specialix IO8+ multiport serial driver. + * + * Copyright (C) 1997 Roger Wolff (R.E.Wolff@BitWizard.nl) + * Copyright (C) 1994-1996 Dmitry Gorodchanin (pgmdsg@ibi.com) + * Modifications (C) 2002 Telford Tools, Inc. (martillo@telfordtools.com) + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + * PURPOSE. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, + * USA. + * + */ + +#define VERSION "2.11" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cdsiolx.h" +#include "../cd1865.h" /* will move all files up one level */ +#include "siolx.h" +#include "plx9060.h" + +#define SIOLX_NORMAL_MAJOR 254 /* One is needed */ +#define SIOLX_ID 0x10 +#define CD186x_MSMR 0x61 /* modem/timer iack */ +#define CD186x_TSMR 0x62 /* tx iack */ +#define CD186x_RSMR 0x63 /* rx iack */ + +/* Configurable options: */ + +/* Am I paranoid or not ? ;-) */ +#define SIOLX_PARANOIA_CHECK + +/* Do I trust the IRQ from the card? (enabeling it doesn't seem to help) + When the IRQ routine leaves the chip in a state that is keeps on + requiring attention, the timer doesn't help either. */ +#undef SIOLX_TIMER +/* + * The following defines are mostly for testing purposes. But if you need + * some nice reporting in your syslog, you can define them also. + */ +#undef SIOLX_REPORT_FIFO +#undef SIOLX_REPORT_OVERRUN + +#ifdef CONFIG_SIOLX_RTSCTS /* may need to set this */ +#define SIOLX_CRTSCTS(bla) 1 +#else +#define SIOLX_CRTSCTS(tty) C_CRTSCTS(tty) +#endif + +/* Used to be outb (0xff, 0x80); */ +#define short_pause() udelay (1) + +#define SIOLX_LEGAL_FLAGS \ + (ASYNC_HUP_NOTIFY | ASYNC_SAK | ASYNC_SPLIT_TERMIOS | \ + ASYNC_SPD_HI | ASYNC_SPEED_VHI | ASYNC_SESSION_LOCKOUT | \ + ASYNC_PGRP_LOCKOUT | ASYNC_CALLOUT_NOHUP) + +#ifndef MIN +#define MIN(a,b) ((a) < (b) ? (a) : (b)) +#endif + +DECLARE_TASK_QUEUE(tq_siolx); + +#undef RS_EVENT_WRITE_WAKEUP +#define RS_EVENT_WRITE_WAKEUP 0 + +#define SIOLX_TYPE_NORMAL 1 +#define SIOLX_TYPE_CALLOUT 2 + +#define BD_8000P 1 +#define BD_16000P 2 +#define BD_8000C 3 +#define BD_16000C 4 +#define BD_MAX BD_16000C + +static struct siolx_board *SiolxIrqRoot[SIOLX_NUMINTS]; + +static char *sio16_board_type[] = +{ + "unknown", + " 8000P ", + "16000P ", + " 8000C ", + "16000C " +}; +static struct tty_driver siolx_driver, siolx_callout_driver; +static int siolx_refcount; +static unsigned char * tmp_buf; +static DECLARE_MUTEX(tmp_buf_sem); +static unsigned long baud_table[] = +{ + 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, + 9600, 19200, 38400, 57600, 115200, 0, +}; +static int siolx_debug = 0; /* turns on lots of */ + /* debugging messages*/ +static int siolx_major = SIOLX_NORMAL_MAJOR; +#ifdef MODULE +static int siolx_minorstart = 256; +#endif +static int siolx_vendor_id = PCI_VENDOR_ID_PLX; +static int siolx_device_id = PCI_DEVICE_ID_PLX_9060SD; +static int siolx_subsystem_vendor = AURASUBSYSTEM_VENDOR_ID; +static int siolx_subsystem_pci_device = AURASUBSYSTEM_MPASYNCPCI; +static int siolx_subsystem_cpci_device = AURASUBSYSTEM_MPASYNCcPCI; +static int siolx_bhindex = SIOLX_BH; /* if this softinterrupt slot is filled */ + +MODULE_PARM(siolx_vendor_id, "i"); +MODULE_PARM(siolx_device_id, "i"); +#ifdef MODULE +MODULE_PARM(siolx_minorstart, "i"); +#endif +MODULE_PARM(siolx_major, "i"); +MODULE_PARM(siolx_subsystem_vendor, "i"); +MODULE_PARM(siolx_subsystem_pci_device, "i"); +MODULE_PARM(siolx_subsystem_cpci_device, "i"); +MODULE_PARM(siolx_bhindex, "i"); + +static struct siolx_board *siolx_board_root; +static struct siolx_board *siolx_board_last; +static struct siolx_port *siolx_port_root; +static struct siolx_port *siolx_port_last; +static unsigned int NumSiolxPorts; +static struct tty_struct **siolx_table; /* make dynamic */ +static struct termios **siolx_termios; +static struct termios **siolx_termios_locked; +static int siolx_driver_registered; +static int siolx_callout_driver_registered; + +#ifdef SIOLX_TIMER +static struct timer_list missed_irq_timer; +static void siolx_interrupt(int irq, void * dev_id, struct pt_regs * regs); +#endif + +extern struct tty_driver *get_tty_driver(kdev_t device); + +static inline int port_No_by_chip (struct siolx_port const * port) +{ + return SIOLX_PORT(port->boardport); +} + +/* Describe the current board and port configuration */ + +static int siolx_read_proc(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct siolx_port *port = siolx_port_root; + off_t begin = 0; + int len = 0; + unsigned int typeno; + char *revision = "$Revision: 1.11 $"; + + len += sprintf(page, "SIOLX Version %s. %s\n", VERSION, revision); + len += sprintf(page+len, "TTY MAJOR = %d, CUA MAJOR = %d.\n", + siolx_driver.major, siolx_callout_driver.major); + + for (port = siolx_port_root; port != NULL; port = port->next_by_global_list) + { + typeno = port->board->boardtype; + if(typeno > BD_MAX) + { + typeno = 0; + } + len += sprintf(page+len, + "%3.3d: bd %2.2d: %s: ch %d: pt %2.2d/%d: tp %4.4d%c: bs %2.2d: sl %2.2d: ir %2.2d: fl %c%c%c%c%c\n", + siolx_driver.minor_start + port->driverport, + port->board->boardnumber, + sio16_board_type[typeno], + port->board->chipnumber, + port->boardport, + port_No_by_chip(port), /* port relative to chip */ + port->board->chiptype, + port->board->chiprev, + port->board->pdev.bus->number, + PCI_SLOT(port->board->pdev.devfn), + port->board->irq, + (port->flags & ASYNC_INITIALIZED) ? 'I' : ' ', + (port->flags & ASYNC_CALLOUT_ACTIVE) ? 'D' : ' ', + (port->flags & ASYNC_NORMAL_ACTIVE) ? 'T' : ' ', + (port->flags & ASYNC_CLOSING) ? 'C' : ' ', + port->board->reario ? 'R' : ' '); + if (len+begin > off+count) + { + goto done; + } + if (len+begin < off) + { + begin += len; + len = 0; + } + } + *eof = 1; + done: + if (off >= len+begin) + { + return 0; + } + *start = page + (off-begin); + return ((count < begin+len-off) ? count : begin+len-off); +} + +#ifndef MODULE +static int GetMinorStart(void) /* minor start can be determined on fly when driver linked to kernel */ +{ + struct tty_driver *ttydriver; + int minor_start = 0; + kdev_t device; + + device = MKDEV(siolx_major, minor_start); + while(ttydriver = get_tty_driver(device), ttydriver != NULL) + { + minor_start += ttydriver->num; + device = MKDEV(TTY_MAJOR, minor_start); + } + return minor_start; + +} +#endif + +/* only once per board chain */ +void SiolxResetBoard(struct siolx_board * bp, struct pci_dev *pdev) +{ + register unsigned int regvalue; + unsigned char savedvalue; + /* + * Yuch. Here's the deal with the reset bits in the + * ECNTL register of the 9060SD. + * + * It appears that LCLRST resets the PLX local configuration + * registers (not the PCI configuration registers) to their + * default values. We need to use LCLRST because it + * is the command (I think) that pulls the local reset + * line on the local bus side of the 9060SD. + * + * Unfortunately, by resetting the PLX local configuration + * registers, we can't use the damn board. So we must + * reinitialize them. The easiest way to do that is to run + * the LDREG command. Unfortunately, it has the side effect + * of reinitializing the PCI configuration registers. It seems, + * however that only the value stowed in ILINE gets choked; all + * of the others seem to be properly preserved. + * + * So, what the code does now is to get a copy of ILINE by + * hand, and then restore it after reloading the registers. + */ + + bp->pdev = *pdev; + bp->plx_vaddr = (unsigned long) ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + if(bp->plx_vaddr) + { + regvalue = readl(bp->plx_vaddr + PLX_ECNTL); + regvalue &= ~PLX_ECNTLLDREG; + regvalue |= PLX_ECNTLLCLRST; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + udelay(200); + regvalue &= ~PLX_ECNTLLCLRST; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &savedvalue); + regvalue |= PLX_ECNTLLDREG; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + udelay(200); + regvalue &= ~PLX_ECNTLLDREG; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, savedvalue); + regvalue |= PLX_ECNTLINITSTAT; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + writel(0, bp->plx_vaddr + PLX_ICSR); + } +} + +void SiolxShutdownBoard(struct siolx_board * bp) +{ + register unsigned int regvalue; + unsigned char savedvalue; + struct pci_dev *pdev; + + if(bp->chipnumber == 0) /* only shutdown first in a chain */ + { + pdev = &bp->pdev; + + writel(0, bp->plx_vaddr + PLX_ICSR); + regvalue = readl(bp->plx_vaddr + PLX_ECNTL); + regvalue &= ~PLX_ECNTLLDREG; + regvalue |= PLX_ECNTLLCLRST; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + udelay(200); + regvalue &= ~PLX_ECNTLLCLRST; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &savedvalue); + regvalue |= PLX_ECNTLLDREG; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + udelay(200); + regvalue &= ~PLX_ECNTLLDREG; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, savedvalue); + regvalue |= PLX_ECNTLINITSTAT; + writel(regvalue, bp->plx_vaddr + PLX_ECNTL); + writel(0, bp->plx_vaddr + PLX_ICSR); + iounmap((void*)bp->plx_vaddr); + bp->plx_vaddr = 0; + } +} + +static inline int siolx_paranoia_check(struct siolx_port const * port, + kdev_t device, const char *routine) +{ +#ifdef SIOLX_PARANOIA_CHECK + static const char *badmagic = + KERN_ERR "siolx: Warning: bad siolx port magic number for device %s in %s\n"; + static const char *badinfo = + KERN_ERR "siolx: Warning: null siolx port for device %s in %s\n"; + + if (!port) + { + printk(badinfo, kdevname(device), routine); + return 1; + } + if (port->magic != SIOLX_MAGIC) + { + printk(badmagic, kdevname(device), routine); + return 1; + } +#endif + return 0; +} + + +/* + * + * Service functions for siolx Aurora Asynchronous Adapter driver. + * + */ + +/* Get board number from pointer */ +static inline int board_No (struct siolx_board * bp) +{ + return bp->boardnumber; /* note same for all chips/boards in a chain */ +} + + +/* Get port number from pointer */ +static inline int port_No (struct siolx_port const * port) +{ + return port->driverport; /* offset from minor start */ +} + +/* Get pointer to board from pointer to port */ +static inline struct siolx_board * port_Board(struct siolx_port const * port) +{ + return port->board; /* same for ports on both chips on a board */ +} + + +/* Input Byte from CL CD186x register */ +static inline unsigned char siolx_in(struct siolx_board * bp, unsigned short reg) +{ + return readb (bp->base + reg); +} + + +/* Output Byte to CL CD186x register */ +static inline void siolx_out(struct siolx_board * bp, unsigned short reg, + unsigned char val) +{ + writeb(val, bp->base + reg); +} + + +/* Wait for Channel Command Register ready */ +static int siolx_wait_CCR(struct siolx_board * bp) +{ + unsigned long delay; + + for (delay = SIOLX_CCR_TIMEOUT; delay; delay--) + { + udelay(1); + if (!siolx_in(bp, CD186x_CCR)) + { + return 0; + } + } + printk(KERN_ERR "siolx:board %d: timeout waiting for CCR.\n", board_No(bp)); + return -1; +} + +/* Wait for ready */ +static int siolx_wait_GIVR(struct siolx_board * bp) +{ + unsigned long delay; + + for (delay = SIOLX_CCR_TIMEOUT; delay; delay--) + { + udelay(1); + if (siolx_in(bp, CD186x_GIVR) == (unsigned char) 0xff) + { + return 0; + } + } + printk(KERN_ERR "siolx: board %d: timeout waiting for GIVR.\n", board_No(bp)); + return -1; +} + +static inline void siolx_release_io_range(struct siolx_board * bp) +{ + if((bp->chipnumber == 0) && bp->vaddr) /* only release from first board in a chain */ + { + iounmap((void*)bp->vaddr); + bp->vaddr = 0; + } +} + +/* Must be called with enabled interrupts */ + +static inline void siolx_long_delay(unsigned long delay) +{ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(delay); +} + +/* Reset and setup CD186x chip */ +static int siolx_init_CD186x(struct siolx_board * bp) +{ + unsigned long flags; + int scaler; + int rv = 1; + int rev; + int chip; + + save_flags(flags); /* not sure of need to turn off ints */ + cli(); + if(siolx_wait_CCR(bp)) + { + restore_flags(flags); + return 0; /* Wait for CCR ready */ + } + siolx_out(bp, CD186x_CAR, 0); + siolx_out(bp, CD186x_GIVR, 0); + siolx_out(bp, CD186x_CCR, CCR_HARDRESET); /* Reset CD186x chip */ + if(siolx_wait_GIVR(bp)) + { + restore_flags(flags); + return 0; + } + sti(); + siolx_long_delay(HZ/20); /* Delay 0.05 sec */ + cli(); + siolx_out(bp, CD186x_GIVR, SIOLX_ID | (bp->chipnumber ? 0x80 : 0)); /* Set ID for this chip */ +#if 0 + siolx_out(bp, CD186x_GICR, 0); /* Clear all bits */ +#endif + scaler = SIOLX_OSCFREQ/1000; + siolx_out(bp, CD186x_PPRH, scaler >> 8); + siolx_out(bp, CD186x_PPRL, scaler & 0xff); + + /* Chip revcode pkgtype + GFRCR SRCR bit 7 + CD180 rev B 0x81 0 + CD180 rev C 0x82 0 + CD1864 rev A 0x82 1 + CD1865 rev A 0x83 1 -- Do not use!!! Does not work. + CD1865 rev B 0x84 1 + -- Thanks to Gwen Wang, Cirrus Logic. + */ + + switch (siolx_in(bp, CD186x_GFRCR)) + { + case 0x82: + chip = 1864; + rev='A'; + break; + case 0x83: + chip = 1865; + rev='A'; + break; + case 0x84: + chip = 1865; + rev='B'; + break; + case 0x85: + chip = 1865; + rev='C'; + break; /* Does not exist at this time */ + default: + chip=-1; + rev='x'; + break; + } + +#if SIOLX_DEBUG > 2 + printk (KERN_DEBUG " GFCR = 0x%02x\n", siolx_in(bp, CD186x_GFRCR) ); +#endif + + siolx_out(bp, CD186x_MSMR, CD186x_MRAR); /* load up match regs with address regs */ + siolx_out(bp, CD186x_TSMR, CD186x_TRAR); + siolx_out(bp, CD186x_RSMR, CD186x_RRAR); + +#if 0 + DEBUGPRINT((KERN_ALERT "match reg values are msmr %x, tsmr %x, rsmr %x.\n", + siolx_in(bp, CD186x_MSMR), + siolx_in(bp, CD186x_TSMR), + siolx_in(bp, CD186x_RSMR))); +#endif + + siolx_out(bp, CD186x_SRCR, SRCR_AUTOPRI | SRCR_GLOBPRI | SRCR_REGACKEN); + /* Setting up prescaler. We need 4 ticks per 1 ms */ + + printk(KERN_INFO"siolx: CD%4.4d%c detected at 0x%lx, IRQ %d, on Aurora asynchronous adapter board %d, chip number %d.\n", + chip, rev, bp->base, bp->irq, board_No(bp), bp->chipnumber); + + bp->chiptype = chip; + bp->chiprev = rev; + + restore_flags(flags); + return rv; +} + + +#ifdef SIOLX_TIMER +void missed_irq (unsigned long data) +{ + if (siolx_in ((struct siolx_board *)data, CD186x_SRSR) & + (SRSR_RREQint | + SRSR_TREQint | + SRSR_MREQint)) + { + printk (KERN_INFO "Missed interrupt... Calling int from timer. \n"); + siolx_interrupt (((struct siolx_board *)data)->irq, + NULL, NULL); + } + missed_irq_timer.expires = jiffies + HZ; + add_timer (&missed_irq_timer); +} +#endif + +/* Main probing routine, also sets irq. */ +static int siolx_probe(struct siolx_board *bp) +{ + unsigned char val1, val2; + + /* Are the I/O ports here ? */ + siolx_out(bp, CD186x_PPRL, 0x5a); + short_pause (); + val1 = siolx_in(bp, CD186x_PPRL); + + siolx_out(bp, CD186x_PPRL, 0xa5); + short_pause (); + val2 = siolx_in(bp, CD186x_PPRL); + + if ((val1 != 0x5a) || (val2 != 0xa5)) + { + printk(KERN_INFO + "siolx: cd serial chip not found at base %ld.\n", + bp->base); + return 1; + } + + /* Reset CD186x */ + if (!siolx_init_CD186x(bp)) + { + return -EIO; + } + +#ifdef SIOLX_TIMER + init_timer (&missed_irq_timer); + missed_irq_timer.function = missed_irq; + missed_irq_timer.data = (unsigned long) bp; + missed_irq_timer.expires = jiffies + HZ; + add_timer (&missed_irq_timer); +#endif + return 0; +} + +/* + * + * Interrupt processing routines. + * */ + +static inline void siolx_mark_event(struct siolx_port * port, int event) +{ + /* + * I'm not quite happy with current scheme all serial + * drivers use their own BH routine. + * It seems this easily can be done with one BH routine + * serving for all serial drivers. + * For now I must introduce another one - SIOLX_BH. + * Still hope this will be changed in near future. + * -- Dmitry. + */ + /* I use a module parameter that can be set at module + * load time so that this driver can be downloaded into + * a kernel where the value of SIOLX_BX has been allocated + * to something else. This kludge was not necessary + * in the ASLX driver because AURORA_BH had already + * been allocated for the sparc and there was no + * similar driver for x86 while the ASLX driver probably + * will not work for the SPARC and is not guaranteed to + * do so (at some point I should clean this situation up) -- Joachim*/ + set_bit(event, &port->event); + queue_task(&port->tqueue, &tq_siolx); + mark_bh(siolx_bhindex); +} + +static inline struct siolx_port * siolx_get_port(struct siolx_board * bp, + unsigned char const * what) +{ + unsigned char channel; + struct siolx_port * port; + + channel = siolx_in(bp, CD186x_GICR) >> GICR_CHAN_OFF; + if (channel < CD186x_NCH) + { + port = bp->portlist; + while(port) + { + if(channel == 0) + { + break; + } + port = port->next_by_board; + --channel; + } + + if(port && (port->flags & ASYNC_INITIALIZED)) /* port should be opened */ + { + return port; + } + } + printk(KERN_INFO "sx%d: %s interrupt from invalid port %d\n", + board_No(bp), what, channel); + return NULL; +} + + +static inline void siolx_receive_exc(struct siolx_board * bp) +{ + struct siolx_port *port; + struct tty_struct *tty; + unsigned char status; + unsigned char ch; + + if (!(port = siolx_get_port(bp, "Receive"))) + return; + + tty = port->tty; + if (tty->flip.count >= TTY_FLIPBUF_SIZE) + { + printk(KERN_INFO "sx%d: port %d: Working around flip buffer overflow.\n", + board_No(bp), port_No(port)); + return; + } + +#ifdef SIOLX_REPORT_OVERRUN + status = siolx_in(bp, CD186x_RCSR); + if (status & RCSR_OE) + { + port->overrun++; +#if SIOLX_DEBUG + printk(KERN_DEBUG "sx%d: port %d: Overrun. Total %ld overruns.\n", + board_No(bp), port_No(port), port->overrun); +#endif + } + status &= port->mark_mask; +#else + status = siolx_in(bp, CD186x_RCSR) & port->mark_mask; +#endif + ch = siolx_in(bp, CD186x_RDR); + if (!status) + { + return; + } + if (status & RCSR_TOUT) + { + printk(KERN_INFO "siolx: board %d: chip %d: port %d: Receiver timeout. Hardware problems ?\n", + board_No(bp), bp->chipnumber, port_No(port)); + return; + + } + else if (status & RCSR_BREAK) + { +#ifdef SIOLX_DEBUG + printk(KERN_DEBUG "siolx: board %d: chip %d: port %d: Handling break...\n", + board_No(bp), bp->chipnumber, port_No(port)); +#endif + *tty->flip.flag_buf_ptr++ = TTY_BREAK; + if (port->flags & ASYNC_SAK) + { + do_SAK(tty); + } + + } + else if (status & RCSR_PE) + { + *tty->flip.flag_buf_ptr++ = TTY_PARITY; + } + else if (status & RCSR_FE) + { + *tty->flip.flag_buf_ptr++ = TTY_FRAME; + } + + else if (status & RCSR_OE) + { + *tty->flip.flag_buf_ptr++ = TTY_OVERRUN; + } + + else + { + *tty->flip.flag_buf_ptr++ = 0; + } + + *tty->flip.char_buf_ptr++ = ch; + tty->flip.count++; + queue_task(&tty->flip.tqueue, &tq_timer); +} + + +static inline void siolx_receive(struct siolx_board * bp) +{ + struct siolx_port *port; + struct tty_struct *tty; + unsigned char count; + + if (!(port = siolx_get_port(bp, "Receive"))) + return; + + tty = port->tty; + + count = siolx_in(bp, CD186x_RDCR); + +#ifdef SIOLX_REPORT_FIFO + port->hits[count > 8 ? 9 : count]++; +#endif + + while (count--) + { + if (tty->flip.count >= TTY_FLIPBUF_SIZE) + { + printk(KERN_INFO "siolx: board %d: chip %d: port %d: Working around flip buffer overflow.\n", + board_No(bp), bp->chipnumber, port_No(port)); + break; + } + *tty->flip.char_buf_ptr++ = siolx_in(bp, CD186x_RDR); + *tty->flip.flag_buf_ptr++ = 0; + tty->flip.count++; + } + queue_task(&tty->flip.tqueue, &tq_timer); +} + +static inline void siolx_transmit(struct siolx_board * bp) +{ + struct siolx_port *port; + struct tty_struct *tty; + unsigned char count; + + if (!(port = siolx_get_port(bp, "Transmit"))) + return; + + tty = port->tty; + + if(port->IER & IER_TXEMPTY) + { + /* FIFO drained */ +#if 0 + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); +#endif + port->IER &= ~IER_TXEMPTY; + siolx_out(bp, CD186x_IER, port->IER); + return; + } + + if(((port->xmit_cnt <= 0) && !port->break_length) || + tty->stopped || tty->hw_stopped) + { +#if 0 + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); +#endif + port->IER &= ~IER_TXRDY; + siolx_out(bp, CD186x_IER, port->IER); + return; + } + + if (port->break_length) + { + if (port->break_length > 0) + { + if (port->COR2 & COR2_ETC) + { + siolx_out(bp, CD186x_TDR, CD186x_C_ESC); + siolx_out(bp, CD186x_TDR, CD186x_C_SBRK); + port->COR2 &= ~COR2_ETC; + } + count = MIN(port->break_length, 0xff); + siolx_out(bp, CD186x_TDR, CD186x_C_ESC); + siolx_out(bp, CD186x_TDR, CD186x_C_DELAY); + siolx_out(bp, CD186x_TDR, count); + if (!(port->break_length -= count)) + { + port->break_length--; + } + } + else + { + siolx_out(bp, CD186x_TDR, CD186x_C_ESC); + siolx_out(bp, CD186x_TDR, CD186x_C_EBRK); + siolx_out(bp, CD186x_COR2, port->COR2); + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_CORCHG2); + port->break_length = 0; + } + return; + } + + count = CD186x_NFIFO; + do + { + siolx_out(bp, CD186x_TDR, port->xmit_buf[port->xmit_tail++]); + port->xmit_tail = port->xmit_tail & (SERIAL_XMIT_SIZE-1); + if (--port->xmit_cnt <= 0) + { + break; + } + } while (--count > 0); + + if (port->xmit_cnt <= 0) + { +#if 0 + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); +#endif + port->IER &= ~IER_TXRDY; + siolx_out(bp, CD186x_IER, port->IER); + } + if (port->xmit_cnt <= port->wakeup_chars) + { + siolx_mark_event(port, RS_EVENT_WRITE_WAKEUP); + } +} + + +static inline void siolx_check_modem(struct siolx_board * bp) +{ + struct siolx_port *port; + struct tty_struct *tty; + unsigned char mcr; + +#ifdef SIOLX_DEBUG + printk (KERN_DEBUG "Modem intr. "); +#endif + if (!(port = siolx_get_port(bp, "Modem"))) + { + return; + } + + tty = port->tty; + + mcr = siolx_in(bp, CD186x_MCR); + DEBUGPRINT((KERN_ALERT "mcr = %02x.\n", mcr)); + + if ((mcr & MCR_CDCHG)) + { +#ifdef SIOLX_DEBUG + DEBUGPRINT((KERN_DEBUG "CD just changed... ")); +#endif + if (siolx_in(bp, CD186x_MSVR) & MSVR_CD) + { +#ifdef SIOLX_DEBUG + DEBUGPRINT(( "Waking up guys in open.\n")); +#endif + wake_up_interruptible(&port->open_wait); /* note no linefeed in previous print */ + } + else if (!((port->flags & ASYNC_CALLOUT_ACTIVE) && + (port->flags & ASYNC_CALLOUT_NOHUP))) + { +#ifdef SIOLX_DEBUG + DEBUGPRINT(( "Sending HUP.\n")); /* note no linefeed in previous print */ +#endif + MOD_INC_USE_COUNT; + if (schedule_task(&port->tqueue_hangup) == 0) + { + MOD_DEC_USE_COUNT; + } + } + else + { +#ifdef SIOLX_DEBUG + DEBUGPRINT(("Don't need to send HUP.\n")); /* note no linefeed in previous print */ +#endif + } + } + +#ifdef SIOLX_BRAIN_DAMAGED_CTS + if (mcr & MCR_CTSCHG) + { + if (siolx_in(bp, CD186x_MSVR) & MSVR_CTS) + { + tty->hw_stopped = 0; + port->IER |= IER_TXRDY; + if (port->xmit_cnt <= port->wakeup_chars) + siolx_mark_event(port, RS_EVENT_WRITE_WAKEUP); + } + else + { + tty->hw_stopped = 1; + port->IER &= ~IER_TXRDY; + } + siolx_out(bp, CD186x_IER, port->IER); + } + if (mcr & MCR_DSSXHG) + { + if (siolx_in(bp, CD186x_MSVR) & MSVR_DSR) + { + tty->hw_stopped = 0; + port->IER |= IER_TXRDY; + if (port->xmit_cnt <= port->wakeup_chars) + { + siolx_mark_event(port, RS_EVENT_WRITE_WAKEUP); + } + } + else + { + tty->hw_stopped = 1; + port->IER &= ~IER_TXRDY; + } + siolx_out(bp, CD186x_IER, port->IER); + } +#endif /* SIOLX_BRAIN_DAMAGED_CTS */ + + /* Clear change bits */ + siolx_out(bp, CD186x_MCR, 0); +} + +/* The main interrupt processing routine */ +static void siolx_interrupt(int irq, void * dev_id, struct pt_regs * regs) +{ + unsigned char status; + unsigned char rcsr; + struct siolx_board *bp; + + if((irq < 0) || (irq >= SIOLX_NUMINTS)) + { + printk(KERN_ALERT "siolx: bad interrupt value %i.\n", irq); + return; + } + /* walk through all the cards on the interrupt that occurred. */ + for(bp = SiolxIrqRoot[irq]; bp != NULL; bp = bp->next_by_interrupt) + + { + while((readl(bp->intstatus) & PLX_ICSRINTACTIVE) != 0) /* work on on board */ + { + status = siolx_in(bp, CD186x_SRSR); + + if(status & SRSR_RREQint) + { + siolx_in(bp, CD186x_RRAR); + rcsr = siolx_in(bp, CD186x_RCSR); + if(rcsr == 0) + { + siolx_receive(bp); + } + else + { + siolx_receive_exc(bp); + } + } + else if (status & SRSR_TREQint) + { + siolx_in(bp, CD186x_TRAR); + siolx_transmit(bp); + } + else if (status & SRSR_MREQint) + { + siolx_in(bp, CD186x_MRAR); + siolx_check_modem(bp); + } + siolx_out(bp, CD186x_EOIR, 1); /* acknowledge the interrupt */ + bp = bp->next_by_chain; /* go to next chip on card -- maybe this one */ + } /* it does not matter if bp changes all in a chain have same next by interrupt */ + } +} + + +/* + * Setting up port characteristics. + * Must be called with disabled interrupts + */ +static void siolx_change_speed(struct siolx_board *bp, struct siolx_port *port) +{ + struct tty_struct *tty; + unsigned long baud; + long tmp; + unsigned char cor1 = 0, cor3 = 0; + unsigned char mcor1 = 0, mcor2 = 0; + static int again; + + tty = port->tty; + + if(!tty || !tty->termios) + { + return; + } + + port->IER = 0; + port->COR2 = 0; + /* Select port on the board */ + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + + /* The Siolx board doens't implement the RTS lines. + They are used to set the IRQ level. Don't touch them. */ + /* Must check how to apply these to sio16 boards */ + if (SIOLX_CRTSCTS(tty)) + { + port->MSVR = (MSVR_DTR | (siolx_in(bp, CD186x_MSVR) & MSVR_RTS)); + } + else + { + port->MSVR = (siolx_in(bp, CD186x_MSVR) & MSVR_RTS); + } +#ifdef DEBUG_SIOLX + DEBUGPRINT((KERN_DEBUG "siolx: got MSVR=%02x.\n", port->MSVR)); +#endif + baud = C_BAUD(tty); + + if (baud & CBAUDEX) + { + baud &= ~CBAUDEX; + if((baud < 1) || (baud > 2)) + { + port->tty->termios->c_cflag &= ~CBAUDEX; + } + else + { + baud += 15; + } + } + if (baud == 15) + { + if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) + { + baud ++; + } + if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) + { + baud += 2; + } + } + + + if (!baud_table[baud]) + { + /* Drop DTR & exit */ +#ifdef SIOLX_DEBUG + DEBUGPRINT((KERN_DEBUG "siolx: Dropping DTR... Hmm....\n")); +#endif + if (!SIOLX_CRTSCTS (tty)) + { + port->MSVR &= ~ MSVR_DTR; + siolx_out(bp, CD186x_MSVR, port->MSVR ); + } +#ifdef DEBUG_SIOLX + else + { + DEBUGPRINT((KERN_DEBUG "siolx: Can't drop DTR: no DTR.\n")); + } +#endif + return; + } + else + { + /* Set DTR on */ + if (!SIOLX_CRTSCTS (tty)) + { + port ->MSVR |= MSVR_DTR; + } + } + + /* + * Now we must calculate some speed depended things + */ + + /* Set baud rate for port */ + tmp = port->custom_divisor ; + if(tmp) + { + DEBUGPRINT((KERN_INFO "siolx: board %d: chip %d: port %d: Using custom baud rate divisor %ld. \n" + "This is an untested option, please be carefull.\n", + board_No(bp), + bp->chipnumber, + port_No(port), tmp)); + } + else + { + tmp = (((SIOLX_OSCFREQ + baud_table[baud]/2) / baud_table[baud] + + CD186x_TPC/2) / CD186x_TPC); + } + + if ((tmp < 0x10) && time_before(again, jiffies)) + { + again = jiffies + HZ * 60; + /* Page 48 of version 2.0 of the CL-CD1865 databook */ + if (tmp >= 12) + { + DEBUGPRINT((KERN_INFO "siolx: board %d: chip %d: port %d:Baud rate divisor is %ld. \n" + "Performance degradation is possible.\n" + "Read siolx.txt for more info.\n", + board_No(bp), bp->chipnumber, + port_No (port), tmp)); + } else + { + DEBUGPRINT((KERN_INFO "siolx: board %d: chip %d: port %d: Baud rate divisor is %ld. \n" + " Warning: overstressing Cirrus chip. " + " This might not work.\n" + " Read siolx.txt for more info.\n", + board_No(bp), bp->chipnumber, port_No (port), tmp)); + } + } + + siolx_out(bp, CD186x_RBPRH, (tmp >> 8) & 0xff); + siolx_out(bp, CD186x_TBPRH, (tmp >> 8) & 0xff); + siolx_out(bp, CD186x_RBPRL, tmp & 0xff); + siolx_out(bp, CD186x_TBPRL, tmp & 0xff); + + if (port->custom_divisor) + { + baud = (SIOLX_OSCFREQ + port->custom_divisor/2) / port->custom_divisor; + baud = ( baud + 5 ) / 10; + } else + { + baud = (baud_table[baud] + 5) / 10; /* Estimated CPS */ + } + + /* Two timer ticks seems enough to wakeup something like SLIP driver */ + tmp = ((baud + HZ/2) / HZ) * 2 - CD186x_NFIFO; + port->wakeup_chars = (tmp < 0) ? 0 : ((tmp >= SERIAL_XMIT_SIZE) ? + SERIAL_XMIT_SIZE - 1 : tmp); + + /* Receiver timeout will be transmission time for 1.5 chars */ + tmp = (SIOLX_TPS + SIOLX_TPS/2 + baud/2) / baud; + tmp = (tmp > 0xff) ? 0xff : tmp; + siolx_out(bp, CD186x_RTPR, tmp); + + switch (C_CSIZE(tty)) + { + case CS5: + cor1 |= COR1_5BITS; + break; + case CS6: + cor1 |= COR1_6BITS; + break; + case CS7: + cor1 |= COR1_7BITS; + break; + case CS8: + cor1 |= COR1_8BITS; + break; + } + + if (C_CSTOPB(tty)) + { + cor1 |= COR1_2SB; + } + + cor1 |= COR1_IGNORE; + if (C_PARENB(tty)) + { + cor1 |= COR1_NORMPAR; + if (C_PARODD(tty)) + { + cor1 |= COR1_ODDP; + } + if (I_INPCK(tty)) + { + cor1 &= ~COR1_IGNORE; + } + } + /* Set marking of some errors */ + port->mark_mask = RCSR_OE | RCSR_TOUT; + if (I_INPCK(tty)) + { + port->mark_mask |= RCSR_FE | RCSR_PE; + } + if (I_BRKINT(tty) || I_PARMRK(tty)) + { + port->mark_mask |= RCSR_BREAK; + } + if (I_IGNPAR(tty)) + { + port->mark_mask &= ~(RCSR_FE | RCSR_PE); + } + if (I_IGNBRK(tty)) + { + port->mark_mask &= ~RCSR_BREAK; + if (I_IGNPAR(tty)) + { + /* Real raw mode. Ignore all */ + port->mark_mask &= ~RCSR_OE; + } + } + /* Enable Hardware Flow Control */ + if (C_CRTSCTS(tty)) + { +#ifdef SIOLX_BRAIN_DAMAGED_CTS + port->IER |= IER_DSR | IER_CTS; + mcor1 |= MCOR1_DSRZD | MCOR1_CTSZD; + mcor2 |= MCOR2_DSROD | MCOR2_CTSOD; + tty->hw_stopped = !(siolx_in(bp, CD186x_MSVR) & (MSVR_CTS|MSVR_DSR)); +#else + port->COR2 |= COR2_CTSAE; +#endif + } + /* Enable Software Flow Control. FIXME: I'm not sure about this */ + /* Some people reported that it works, but I still doubt it */ + if (I_IXON(tty)) + { + port->COR2 |= COR2_TXIBE; + cor3 |= (COR3_FCT | COR3_SCDE); + if (I_IXANY(tty)) + { + port->COR2 |= COR2_IXM; + } + siolx_out(bp, CD186x_SCHR1, START_CHAR(tty)); + siolx_out(bp, CD186x_SCHR2, STOP_CHAR(tty)); + siolx_out(bp, CD186x_SCHR3, START_CHAR(tty)); + siolx_out(bp, CD186x_SCHR4, STOP_CHAR(tty)); + } + if (!C_CLOCAL(tty)) + { + /* Enable CD check */ + port->IER |= IER_CD; + mcor1 |= MCOR1_CDZD; + mcor2 |= MCOR2_CDOD; + } + + if (C_CREAD(tty)) + { + /* Enable receiver */ + port->IER |= IER_RXD; + } + + /* Set input FIFO size (1-8 bytes) */ + cor3 |= SIOLX_RXFIFO; + /* Setting up CD186x channel registers */ + siolx_out(bp, CD186x_COR1, cor1); + siolx_out(bp, CD186x_COR2, port->COR2); + siolx_out(bp, CD186x_COR3, cor3); + /* Make CD186x know about registers change */ + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_CORCHG1 | CCR_CORCHG2 | CCR_CORCHG3); + /* Setting up modem option registers */ +#ifdef DEBUG_SIOLX + DEBUGPRINT((KERN_ALERT "siolx: Mcor1 = %02x, mcor2 = %02x.\n", mcor1, mcor2)); +#endif + siolx_out(bp, CD186x_MCOR1, mcor1); + siolx_out(bp, CD186x_MCOR2, mcor2); + /* Enable CD186x transmitter & receiver */ + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_TXEN | CCR_RXEN); + /* Enable interrupts */ + siolx_out(bp, CD186x_IER, port->IER); + /* And finally set the modem lines... */ + siolx_out(bp, CD186x_MSVR, port->MSVR); +} + + +/* Must be called with interrupts enabled */ +static int siolx_setup_port(struct siolx_board *bp, struct siolx_port *port) +{ + unsigned long flags; + + if (port->flags & ASYNC_INITIALIZED) + { + return 0; + } + + if (!port->xmit_buf) + { + /* We may sleep in get_free_page() */ + unsigned long tmp; + + if (!(tmp = get_free_page(GFP_KERNEL))) + { + return -ENOMEM; + } + + if (port->xmit_buf) + { + free_page(tmp); + return -ERESTARTSYS; + } + port->xmit_buf = (unsigned char *) tmp; + } + + save_flags(flags); cli(); + + if (port->tty) + { + clear_bit(TTY_IO_ERROR, &port->tty->flags); + } + + port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; + siolx_change_speed(bp, port); + port->flags |= ASYNC_INITIALIZED; + + restore_flags(flags); + return 0; +} + + +/* Must be called with interrupts disabled */ +static void siolx_shutdown_port(struct siolx_board *bp, struct siolx_port *port) +{ + struct tty_struct *tty; + + if (!(port->flags & ASYNC_INITIALIZED)) + { + return; + } + +#ifdef SIOLX_REPORT_OVERRUN + DEBUGPRINT((KERN_INFO "siolx: board %d: chip %d: port %d: Total %ld overruns were detected.\n", + board_No(bp), bp->chipnumber, port_No(port), port->overrun)); +#endif +#ifdef SIOLX_REPORT_FIFO + { + int i; + + DEBUGPRINT((KERN_INFO "siolx: board %d: chip %d: port %d: FIFO hits [ ", + board_No(bp), bp->chipnumber, port_No(port))); + for (i = 0; i < 10; i++) + { + DEBUGPRINT(("%ld ", port->hits[i])); + } + DEBUGPRINT(("].\n")); + } +#endif + if (port->xmit_buf) + { + free_page((unsigned long) port->xmit_buf); + port->xmit_buf = NULL; + } + + /* Select port */ + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + + if (!(tty = port->tty) || C_HUPCL(tty)) + { + /* Drop DTR */ + siolx_out(bp, CD186x_MSVDTR, 0); + } + + /* Reset port */ + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_SOFTRESET); + /* Disable all interrupts from this port */ + port->IER = 0; + siolx_out(bp, CD186x_IER, port->IER); + + if (tty) + { + set_bit(TTY_IO_ERROR, &tty->flags); + } + port->flags &= ~ASYNC_INITIALIZED; + + /* + * If this is the last opened port on the board + * shutdown whole board + */ + MOD_DEC_USE_COUNT; +} + + +static int block_til_ready(struct tty_struct *tty, struct file * filp, + struct siolx_port *port) +{ + DECLARE_WAITQUEUE(wait, current); + struct siolx_board *bp = port_Board(port); + int retval; + int do_clocal = 0; + int CD; + + /* + * If the device is in the middle of being closed, then block + * until it's done, and then try again. + */ + if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) + { + interruptible_sleep_on(&port->close_wait); + if (port->flags & ASYNC_HUP_NOTIFY) + { + return -EAGAIN; + } + else + { + return -ERESTARTSYS; + } + } + + /* + * If this is a callout device, then just make sure the normal + * device isn't being used. + */ + if (tty->driver.subtype == SIOLX_TYPE_CALLOUT) + { + if (port->flags & ASYNC_NORMAL_ACTIVE) + { + return -EBUSY; + } + if ((port->flags & ASYNC_CALLOUT_ACTIVE) && + (port->flags & ASYNC_SESSION_LOCKOUT) && + (port->session != current->session)) + { + return -EBUSY; + } + if ((port->flags & ASYNC_CALLOUT_ACTIVE) && + (port->flags & ASYNC_PGRP_LOCKOUT) && + (port->pgrp != current->pgrp)) + { + return -EBUSY; + } + port->flags |= ASYNC_CALLOUT_ACTIVE; + return 0; + } + + /* + * If non-blocking mode is set, or the port is not enabled, + * then make the check up front and then exit. + */ + if ((filp->f_flags & O_NONBLOCK) || + (tty->flags & (1 << TTY_IO_ERROR))) + { + if (port->flags & ASYNC_CALLOUT_ACTIVE) + { + return -EBUSY; + } + port->flags |= ASYNC_NORMAL_ACTIVE; + return 0; + } + + if (port->flags & ASYNC_CALLOUT_ACTIVE) + { + if (port->normal_termios.c_cflag & CLOCAL) + { + do_clocal = 1; + } + } + else + { + if (C_CLOCAL(tty)) + { + do_clocal = 1; + } + } + + /* + * Block waiting for the carrier detect and the line to become + * free (i.e., not in use by the callout). While we are in + * this loop, info->count is dropped by one, so that + * rs_close() knows when to free things. We restore it upon + * exit, either normal or abnormal. + */ + retval = 0; + add_wait_queue(&port->open_wait, &wait); + cli(); + if (!tty_hung_up_p(filp)) + { + port->count--; + } + sti(); + port->blocked_open++; + while (1) + { + cli(); + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + CD = siolx_in(bp, CD186x_MSVR) & MSVR_CD; + if (!(port->flags & ASYNC_CALLOUT_ACTIVE)) + { + if (SIOLX_CRTSCTS (tty)) + { + /* Activate RTS */ + port->MSVR |= MSVR_DTR; + siolx_out (bp, CD186x_MSVR, port->MSVR); + } + else + { + /* Activate DTR */ + port->MSVR |= MSVR_DTR; + siolx_out (bp, CD186x_MSVR, port->MSVR); + } + } + sti(); + set_current_state(TASK_INTERRUPTIBLE); + if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) + { + if (port->flags & ASYNC_HUP_NOTIFY) + { + retval = -EAGAIN; + } + else + { + retval = -ERESTARTSYS; + } + break; + } + if (!(port->flags & ASYNC_CALLOUT_ACTIVE) && + !(port->flags & ASYNC_CLOSING) && + (do_clocal || CD)) + { + break; + } + if (signal_pending(current)) + { + retval = -ERESTARTSYS; + break; + } + schedule(); + } + current->state = TASK_RUNNING; + remove_wait_queue(&port->open_wait, &wait); + if (!tty_hung_up_p(filp)) + { + port->count++; + } + port->blocked_open--; + if (retval) + { + return retval; + } + + port->flags |= ASYNC_NORMAL_ACTIVE; + return 0; +} + +static inline struct siolx_port *siolx_portstruc(register int line) +{ + register struct siolx_port *pp; + + line -= siolx_driver.minor_start; + for(pp = siolx_port_root; (pp != NULL) && (line >= 0); --line, pp = pp->next_by_global_list) + { + if(line == 0) + { + return pp; + } + } + return NULL; +} + + +static int siolx_open(struct tty_struct * tty, struct file * filp) +{ + int error; + struct siolx_port * port; + struct siolx_board * bp; + unsigned long flags; + + port = siolx_portstruc(MINOR(tty->device)); + + if(port == NULL) + { + return -ENODEV; + } + bp = port->board; + if(bp == NULL) + { + return -ENODEV; + } + +#ifdef DEBUG_SIOLX + printk (KERN_DEBUG "Board = %d, bp = %p, port = %p, portno = %d.\n", + bp->boardnumber, bp, port, siolx_portstruc(MINOR(tty->device))); +#endif + + if (siolx_paranoia_check(port, tty->device, "siolx_open")) + return -ENODEV; + + MOD_INC_USE_COUNT; + + port->count++; + tty->driver_data = port; + port->tty = tty; + + if ((error = siolx_setup_port(bp, port))) + return error; + + if ((error = block_til_ready(tty, filp, port))) + return error; + + if ((port->count == 1) && (port->flags & ASYNC_SPLIT_TERMIOS)) { + if (tty->driver.subtype == SIOLX_TYPE_NORMAL) + *tty->termios = port->normal_termios; + else + *tty->termios = port->callout_termios; + save_flags(flags); cli(); + siolx_change_speed(bp, port); + restore_flags(flags); + } + + port->session = current->session; + port->pgrp = current->pgrp; + return 0; +} + + +static void siolx_close(struct tty_struct * tty, struct file * filp) +{ + struct siolx_port *port = (struct siolx_port *) tty->driver_data; + struct siolx_board *bp; + unsigned long flags; + unsigned long timeout; + + if (!port || siolx_paranoia_check(port, tty->device, "close")) + return; + + save_flags(flags); cli(); + if (tty_hung_up_p(filp)) { + restore_flags(flags); + return; + } + + bp = port_Board(port); + if ((tty->count == 1) && (port->count != 1)) { + printk(KERN_ERR "sx%d: siolx_close: bad port count;" + " tty->count is 1, port count is %d\n", + board_No(bp), port->count); + port->count = 1; + } + if (--port->count < 0) { + printk(KERN_ERR "sx%d: siolx_close: bad port count for tty%d: %d\n", + board_No(bp), port_No(port), port->count); + port->count = 0; + } + if (port->count) { + restore_flags(flags); + return; + } + port->flags |= ASYNC_CLOSING; + /* + * Save the termios structure, since this port may have + * separate termios for callout and dialin. + */ + if (port->flags & ASYNC_NORMAL_ACTIVE) + port->normal_termios = *tty->termios; + if (port->flags & ASYNC_CALLOUT_ACTIVE) + port->callout_termios = *tty->termios; + /* + * Now we wait for the transmit buffer to clear; and we notify + * the line discipline to only process XON/XOFF characters. + */ + tty->closing = 1; + if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE) + tty_wait_until_sent(tty, port->closing_wait); + /* + * At this point we stop accepting input. To do this, we + * disable the receive line status interrupts, and tell the + * interrupt driver to stop checking the data ready bit in the + * line status register. + */ + port->IER &= ~IER_RXD; + if (port->flags & ASYNC_INITIALIZED) { + port->IER &= ~IER_TXRDY; + port->IER |= IER_TXEMPTY; + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + siolx_out(bp, CD186x_IER, port->IER); + /* + * Before we drop DTR, make sure the UART transmitter + * has completely drained; this is especially + * important if there is a transmit FIFO! + */ + timeout = jiffies+HZ; + while(port->IER & IER_TXEMPTY) { + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(port->timeout); + if (time_after(jiffies, timeout)) { + printk (KERN_INFO "siolx: Timeout waiting for close\n"); + break; + } + } + + } + siolx_shutdown_port(bp, port); + if (tty->driver.flush_buffer) + tty->driver.flush_buffer(tty); + if (tty->ldisc.flush_buffer) + tty->ldisc.flush_buffer(tty); + tty->closing = 0; + port->event = 0; + port->tty = 0; + if (port->blocked_open) { + if (port->close_delay) { + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(port->close_delay); + } + wake_up_interruptible(&port->open_wait); + } + port->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CALLOUT_ACTIVE| + ASYNC_CLOSING); + wake_up_interruptible(&port->close_wait); + restore_flags(flags); +} + + +static int siolx_write(struct tty_struct * tty, int from_user, + const unsigned char *buf, int count) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + struct siolx_board *bp; + int c, total = 0; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_write")) + return 0; + + bp = port_Board(port); + + if (!tty || !port->xmit_buf || !tmp_buf) + return 0; + + save_flags(flags); + if (from_user) { + down(&tmp_buf_sem); + while (1) { + c = MIN(count, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1, + SERIAL_XMIT_SIZE - port->xmit_head)); + if (c <= 0) + break; + + c -= copy_from_user(tmp_buf, buf, c); + if (!c) { + if (!total) + total = -EFAULT; + break; + } + + cli(); + c = MIN(c, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1, + SERIAL_XMIT_SIZE - port->xmit_head)); + memcpy(port->xmit_buf + port->xmit_head, tmp_buf, c); + port->xmit_head = (port->xmit_head + c) & (SERIAL_XMIT_SIZE-1); + port->xmit_cnt += c; + restore_flags(flags); + + buf += c; + count -= c; + total += c; + } + up(&tmp_buf_sem); + } else { + while (1) { + cli(); + c = MIN(count, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1, + SERIAL_XMIT_SIZE - port->xmit_head)); + if (c <= 0) { + restore_flags(flags); + break; + } + memcpy(port->xmit_buf + port->xmit_head, buf, c); + port->xmit_head = (port->xmit_head + c) & (SERIAL_XMIT_SIZE-1); + port->xmit_cnt += c; + restore_flags(flags); + + buf += c; + count -= c; + total += c; + } + } + + cli(); + if (port->xmit_cnt && !tty->stopped && !tty->hw_stopped && + !(port->IER & IER_TXRDY)) { + port->IER |= IER_TXRDY; + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + siolx_out(bp, CD186x_IER, port->IER); + } + restore_flags(flags); + return total; +} + + +static void siolx_put_char(struct tty_struct * tty, unsigned char ch) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_put_char")) + return; + + if (!tty || !port->xmit_buf) + return; + + save_flags(flags); cli(); + + if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) { + restore_flags(flags); + return; + } + + port->xmit_buf[port->xmit_head++] = ch; + port->xmit_head &= SERIAL_XMIT_SIZE - 1; + port->xmit_cnt++; + restore_flags(flags); +} + + +static void siolx_flush_chars(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_flush_chars")) + return; + + if (port->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || + !port->xmit_buf) + return; + + save_flags(flags); cli(); + port->IER |= IER_TXRDY; + siolx_out(port_Board(port), CD186x_CAR, port_No_by_chip(port)); + siolx_out(port_Board(port), CD186x_IER, port->IER); + restore_flags(flags); +} + + +static int siolx_write_room(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + int ret; + + if (siolx_paranoia_check(port, tty->device, "siolx_write_room")) + return 0; + + ret = SERIAL_XMIT_SIZE - port->xmit_cnt - 1; + if (ret < 0) + ret = 0; + return ret; +} + + +static int siolx_chars_in_buffer(struct tty_struct *tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + + if (siolx_paranoia_check(port, tty->device, "siolx_chars_in_buffer")) + return 0; + + return port->xmit_cnt; +} + + +static void siolx_flush_buffer(struct tty_struct *tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_flush_buffer")) + return; + + save_flags(flags); cli(); + port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; + restore_flags(flags); + + wake_up_interruptible(&tty->write_wait); + if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && + tty->ldisc.write_wakeup) + (tty->ldisc.write_wakeup)(tty); +} + + +static int siolx_get_modem_info(struct siolx_port * port, unsigned int *value) +{ + struct siolx_board * bp; + unsigned char status; + unsigned int result; + unsigned long flags; + + bp = port_Board(port); + save_flags(flags); cli(); + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + status = siolx_in(bp, CD186x_MSVR); + restore_flags(flags); +#ifdef DEBUG_SIOLX + printk (KERN_DEBUG "Got msvr[%d] = %02x, car = %d.\n", + port_No(port), status, siolx_in (bp, CD186x_CAR)); + printk (KERN_DEBUG "siolx_port = %p, port = %p\n", siolx_port, port); +#endif + if (SIOLX_CRTSCTS(port->tty)) { + result = /* (status & MSVR_RTS) ? */ TIOCM_DTR /* : 0) */ + | ((status & MSVR_DTR) ? TIOCM_RTS : 0) + | ((status & MSVR_CD) ? TIOCM_CAR : 0) + |/* ((status & MSVR_DSR) ? */ TIOCM_DSR /* : 0) */ + | ((status & MSVR_CTS) ? TIOCM_CTS : 0); + } else { + result = /* (status & MSVR_RTS) ? */ TIOCM_RTS /* : 0) */ + | ((status & MSVR_DTR) ? TIOCM_DTR : 0) + | ((status & MSVR_CD) ? TIOCM_CAR : 0) + |/* ((status & MSVR_DSR) ? */ TIOCM_DSR /* : 0) */ + | ((status & MSVR_CTS) ? TIOCM_CTS : 0); + } + put_user(result,(unsigned int *) value); + return 0; +} + + +static int siolx_set_modem_info(struct siolx_port * port, unsigned int cmd, + unsigned int *value) +{ + int error; + unsigned int arg; + unsigned long flags; + struct siolx_board *bp = port_Board(port); + + error = verify_area(VERIFY_READ, value, sizeof(int)); + if (error) + return error; + + get_user(arg, (unsigned long *) value); + switch (cmd) { + case TIOCMBIS: + /* if (arg & TIOCM_RTS) + port->MSVR |= MSVR_RTS; */ + /* if (arg & TIOCM_DTR) + port->MSVR |= MSVR_DTR; */ + + if (SIOLX_CRTSCTS(port->tty)) { + if (arg & TIOCM_RTS) + port->MSVR |= MSVR_DTR; + } else { + if (arg & TIOCM_DTR) + port->MSVR |= MSVR_DTR; + } + break; + case TIOCMBIC: + /* if (arg & TIOCM_RTS) + port->MSVR &= ~MSVR_RTS; */ + /* if (arg & TIOCM_DTR) + port->MSVR &= ~MSVR_DTR; */ + if (SIOLX_CRTSCTS(port->tty)) { + if (arg & TIOCM_RTS) + port->MSVR &= ~MSVR_DTR; + } else { + if (arg & TIOCM_DTR) + port->MSVR &= ~MSVR_DTR; + } + break; + case TIOCMSET: + /* port->MSVR = (arg & TIOCM_RTS) ? (port->MSVR | MSVR_RTS) : + (port->MSVR & ~MSVR_RTS); */ + /* port->MSVR = (arg & TIOCM_DTR) ? (port->MSVR | MSVR_DTR) : + (port->MSVR & ~MSVR_DTR); */ + if (SIOLX_CRTSCTS(port->tty)) { + port->MSVR = (arg & TIOCM_RTS) ? + (port->MSVR | MSVR_DTR) : + (port->MSVR & ~MSVR_DTR); + } else { + port->MSVR = (arg & TIOCM_DTR) ? + (port->MSVR | MSVR_DTR): + (port->MSVR & ~MSVR_DTR); + } + break; + default: + return -EINVAL; + } + save_flags(flags); cli(); + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + siolx_out(bp, CD186x_MSVR, port->MSVR); + restore_flags(flags); + return 0; +} + + +static inline void siolx_send_break(struct siolx_port * port, unsigned long length) +{ + struct siolx_board *bp = port_Board(port); + unsigned long flags; + + save_flags(flags); cli(); + port->break_length = SIOLX_TPS / HZ * length; + port->COR2 |= COR2_ETC; + port->IER |= IER_TXRDY; + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + siolx_out(bp, CD186x_COR2, port->COR2); + siolx_out(bp, CD186x_IER, port->IER); + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_CORCHG2); + siolx_wait_CCR(bp); + restore_flags(flags); +} + + +static inline int siolx_set_serial_info(struct siolx_port * port, + struct serial_struct * newinfo) +{ + struct serial_struct tmp; + struct siolx_board *bp = port_Board(port); + int change_speed; + unsigned long flags; + int error; + + error = verify_area(VERIFY_READ, (void *) newinfo, sizeof(tmp)); + if (error) + return error; + + if (copy_from_user(&tmp, newinfo, sizeof(tmp))) + return -EFAULT; + +#if 0 + if ((tmp.irq != bp->irq) || + (tmp.port != bp->base) || + (tmp.type != PORT_CIRRUS) || + (tmp.baud_base != (SIOLX_OSCFREQ + CD186x_TPC/2) / CD186x_TPC) || + (tmp.custom_divisor != 0) || + (tmp.xmit_fifo_size != CD186x_NFIFO) || + (tmp.flags & ~SIOLX_LEGAL_FLAGS)) + return -EINVAL; +#endif + + change_speed = ((port->flags & ASYNC_SPD_MASK) != + (tmp.flags & ASYNC_SPD_MASK)); + change_speed |= (tmp.custom_divisor != port->custom_divisor); + + if (!capable(CAP_SYS_ADMIN)) { + if ((tmp.close_delay != port->close_delay) || + (tmp.closing_wait != port->closing_wait) || + ((tmp.flags & ~ASYNC_USR_MASK) != + (port->flags & ~ASYNC_USR_MASK))) + return -EPERM; + port->flags = ((port->flags & ~ASYNC_USR_MASK) | + (tmp.flags & ASYNC_USR_MASK)); + port->custom_divisor = tmp.custom_divisor; + } else { + port->flags = ((port->flags & ~ASYNC_FLAGS) | + (tmp.flags & ASYNC_FLAGS)); + port->close_delay = tmp.close_delay; + port->closing_wait = tmp.closing_wait; + port->custom_divisor = tmp.custom_divisor; + } + if (change_speed) { + save_flags(flags); cli(); + siolx_change_speed(bp, port); + restore_flags(flags); + } + return 0; +} + + +static inline int siolx_get_serial_info(struct siolx_port * port, + struct serial_struct * retinfo) +{ + struct serial_struct tmp; + struct siolx_board *bp = port_Board(port); + int error; + + error = verify_area(VERIFY_WRITE, (void *) retinfo, sizeof(tmp)); + if (error) + return error; + + memset(&tmp, 0, sizeof(tmp)); + tmp.type = PORT_CIRRUS; + tmp.line = port->driverport; + tmp.port = bp->base; + tmp.irq = bp->irq; + tmp.flags = port->flags; + tmp.baud_base = (SIOLX_OSCFREQ + CD186x_TPC/2) / CD186x_TPC; + tmp.close_delay = port->close_delay * HZ/100; + tmp.closing_wait = port->closing_wait * HZ/100; + tmp.custom_divisor = port->custom_divisor; + tmp.xmit_fifo_size = CD186x_NFIFO; + if (copy_to_user(retinfo, &tmp, sizeof(tmp))) + return -EFAULT; + return 0; +} + + +static int siolx_ioctl(struct tty_struct * tty, struct file * filp, + unsigned int cmd, unsigned long arg) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + int error; + int retval; + + if (siolx_paranoia_check(port, tty->device, "siolx_ioctl")) + return -ENODEV; + + switch (cmd) { + case TCSBRK: /* SVID version: non-zero arg --> no break */ + retval = tty_check_change(tty); + if (retval) + return retval; + tty_wait_until_sent(tty, 0); + if (!arg) + siolx_send_break(port, HZ/4); /* 1/4 second */ + return 0; + case TCSBRKP: /* support for POSIX tcsendbreak() */ + retval = tty_check_change(tty); + if (retval) + return retval; + tty_wait_until_sent(tty, 0); + siolx_send_break(port, arg ? arg*(HZ/10) : HZ/4); + return 0; + case TIOCGSOFTCAR: + error = verify_area(VERIFY_WRITE, (void *) arg, sizeof(long)); + if (error) + return error; + put_user(C_CLOCAL(tty) ? 1 : 0, + (unsigned long *) arg); + return 0; + case TIOCSSOFTCAR: + get_user(arg, (unsigned long *) arg); + tty->termios->c_cflag = + ((tty->termios->c_cflag & ~CLOCAL) | + (arg ? CLOCAL : 0)); + return 0; + case TIOCMGET: + error = verify_area(VERIFY_WRITE, (void *) arg, + sizeof(unsigned int)); + if (error) + return error; + return siolx_get_modem_info(port, (unsigned int *) arg); + case TIOCMBIS: + case TIOCMBIC: + case TIOCMSET: + return siolx_set_modem_info(port, cmd, (unsigned int *) arg); + case TIOCGSERIAL: + return siolx_get_serial_info(port, (struct serial_struct *) arg); + case TIOCSSERIAL: + return siolx_set_serial_info(port, (struct serial_struct *) arg); + default: + return -ENOIOCTLCMD; + } + return 0; +} + + +static void siolx_throttle(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + struct siolx_board *bp; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_throttle")) + return; + + bp = port_Board(port); + + save_flags(flags); cli(); + + /* Use DTR instead of RTS ! */ + if (SIOLX_CRTSCTS (tty)) + { + port->MSVR &= ~MSVR_DTR; + } + else + { + /* Auch!!! I think the system shouldn't call this then. */ + /* Or maybe we're supposed (allowed?) to do our side of hw + handshake anyway, even when hardware handshake is off. + When you see this in your logs, please report.... */ + printk (KERN_ERR "sx%d: Need to throttle, but can't (hardware hs is off)\n", + port_No (port)); + } + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + if (I_IXOFF(tty)) + { + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_SSCH2); + siolx_wait_CCR(bp); + } + siolx_out(bp, CD186x_MSVR, port->MSVR); + restore_flags(flags); +} + + +static void siolx_unthrottle(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + struct siolx_board *bp; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_unthrottle")) + return; + + bp = port_Board(port); + + save_flags(flags); cli(); + /* XXXX Use DTR INSTEAD???? */ + if (SIOLX_CRTSCTS(tty)) { + port->MSVR |= MSVR_DTR; + } /* Else clause: see remark in "siolx_throttle"... */ + + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + if (I_IXOFF(tty)) { + siolx_wait_CCR(bp); + siolx_out(bp, CD186x_CCR, CCR_SSCH1); + siolx_wait_CCR(bp); + } + siolx_out(bp, CD186x_MSVR, port->MSVR); + restore_flags(flags); +} + + +static void siolx_stop(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + struct siolx_board *bp; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_stop")) + return; + + bp = port_Board(port); + + save_flags(flags); cli(); + port->IER &= ~IER_TXRDY; + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + siolx_out(bp, CD186x_IER, port->IER); + restore_flags(flags); +} + + +static void siolx_start(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + struct siolx_board *bp; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_start")) + return; + + bp = port_Board(port); + + save_flags(flags); cli(); + if (port->xmit_cnt && port->xmit_buf && !(port->IER & IER_TXRDY)) { + port->IER |= IER_TXRDY; + siolx_out(bp, CD186x_CAR, port_No_by_chip(port)); + siolx_out(bp, CD186x_IER, port->IER); + } + restore_flags(flags); +} + + +/* + * This routine is called from the scheduler tqueue when the interrupt + * routine has signalled that a hangup has occurred. The path of + * hangup processing is: + * + * serial interrupt routine -> (scheduler tqueue) -> + * do_siolx_hangup() -> tty->hangup() -> siolx_hangup() + * + */ +static void do_siolx_hangup(void *private_) +{ + struct siolx_port *port = (struct siolx_port *) private_; + struct tty_struct *tty; + + tty = port->tty; + if (tty) + { + tty_hangup(tty); /* FIXME: module removal race here */ + } + MOD_DEC_USE_COUNT; +} + + +static void siolx_hangup(struct tty_struct * tty) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + struct siolx_board *bp; + + if (siolx_paranoia_check(port, tty->device, "siolx_hangup")) + return; + + bp = port_Board(port); + + siolx_shutdown_port(bp, port); + port->event = 0; + port->count = 0; + port->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CALLOUT_ACTIVE); + port->tty = 0; + wake_up_interruptible(&port->open_wait); +} + + +static void siolx_set_termios(struct tty_struct * tty, struct termios * old_termios) +{ + struct siolx_port *port = (struct siolx_port *)tty->driver_data; + unsigned long flags; + + if (siolx_paranoia_check(port, tty->device, "siolx_set_termios")) + return; + + if (tty->termios->c_cflag == old_termios->c_cflag && + tty->termios->c_iflag == old_termios->c_iflag) + return; + + save_flags(flags); cli(); + siolx_change_speed(port_Board(port), port); + restore_flags(flags); + + if ((old_termios->c_cflag & CRTSCTS) && + !(tty->termios->c_cflag & CRTSCTS)) { + tty->hw_stopped = 0; + siolx_start(tty); + } +} + + +static void do_siolx_bh(void) +{ + run_task_queue(&tq_siolx); +} + + +static void do_softint(void *private_) +{ + struct siolx_port *port = (struct siolx_port *) private_; + struct tty_struct *tty; + + if(!(tty = port->tty)) + return; + + if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &port->event)) { + if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && + tty->ldisc.write_wakeup) + (tty->ldisc.write_wakeup)(tty); + wake_up_interruptible(&tty->write_wait); + } +} + +static int siolx_finish_init_drivers(void) +{ + register struct siolx_board *bp; + register unsigned int count; + unsigned int maxport; + struct siolx_port *port; + struct siolx_port *lastport; + int error; + + bp = siolx_board_root; + + while(bp) + { + if(bp->chipnumber == 0) + { + maxport = SIOLX_NPORT; + } + else if((bp->boardtype == BD_16000C) && bp->reario) /* must be second chip of 16000C */ + { + maxport = SIOLX_NPORT/2; + } + else + { + maxport = SIOLX_NPORT; /* must be second chip of 16000P */ + } + + port = NULL; /* probably unnecessary */ + lastport = NULL; + for(count = 0; count < maxport; ++count) + { + port = (struct siolx_port*)kmalloc(sizeof(struct siolx_port), GFP_KERNEL); + if(port == NULL) + { + printk(KERN_ALERT + "siolx: Failed to create port structure on board %p.\n", bp); + break; /* no memory available */ + } + memset(port, 0, sizeof(struct siolx_port)); + + port->callout_termios = siolx_callout_driver.init_termios; + port->normal_termios = siolx_driver.init_termios; + port->magic = SIOLX_MAGIC; + port->tqueue.routine = do_softint; + port->tqueue.data = port; + port->tqueue_hangup.routine = do_siolx_hangup; + port->tqueue_hangup.data = port; + port->close_delay = 50 * HZ/100; + port->closing_wait = 3000 * HZ/100; + init_waitqueue_head(&port->open_wait); + init_waitqueue_head(&port->close_wait); + + port->board = bp; + port->driverport = NumSiolxPorts; + port->boardport = (count + (port->board->chipnumber*SIOLX_NPORT)); /* 0-16 */ + + if(count == 0) + { + bp->portlist = port; + } + else if(lastport) /* if count != 0 lastport should be non-null */ + { + lastport->next_by_board = port; + } + if(siolx_port_root == NULL) + { + siolx_port_root = port; + siolx_port_last = port; + } + else + { + siolx_port_last->next_by_global_list = port; + siolx_port_last = port; + } + lastport = port; + ++NumSiolxPorts; + } + bp = bp->next_by_global_list; + } + + siolx_driver.num = NumSiolxPorts; + + siolx_table = (struct tty_struct **) kmalloc(NumSiolxPorts*sizeof(struct tty_struct *), GFP_KERNEL); + if(siolx_table == NULL) + { + printk(KERN_ALERT "siolx: Could not allocate memory for siolx_table.\n"); + return 1; + } + memset(siolx_table, 0, NumSiolxPorts*sizeof(struct tty_struct *)); + + siolx_termios = (struct termios **) kmalloc(NumSiolxPorts*sizeof(struct termios *), GFP_KERNEL); + if(siolx_termios == NULL) + { + printk(KERN_ALERT "siolx: Could not allocate memory for siolx_termios.\n"); + return 1; + } + memset(siolx_termios, 0, NumSiolxPorts*sizeof(struct termios *)); + + siolx_termios_locked = (struct termios **) kmalloc(NumSiolxPorts*sizeof(struct termios *), GFP_KERNEL); + if(siolx_termios_locked == NULL) + { + printk(KERN_ALERT "siolx: Could not allocate memory for siolx_termios_locked.\n"); + return 1; + } + memset(siolx_termios_locked, 0, NumSiolxPorts*sizeof(struct termios *)); + + siolx_driver.table = siolx_table; /* will be changed */ + siolx_driver.termios = siolx_termios; /* will be changed */ + siolx_driver.termios_locked = siolx_termios_locked; /* will be changed */ + + if ((error = tty_register_driver(&siolx_driver))) + { + if(tmp_buf) + { + free_page((unsigned long)tmp_buf); + tmp_buf = 0; + } + printk(KERN_ERR "siolx: Couldn't register Aurora Asynchronous Adapter driver, error = %d\n", + error); + return 1; + } + if ((error = tty_register_driver(&siolx_callout_driver))) + { + if(tmp_buf) + { + free_page((unsigned long)tmp_buf); + tmp_buf = NULL; + } + tty_unregister_driver(&siolx_driver); + printk(KERN_ERR "siolx: Couldn't register Aurora Asynchronous Adapter callout driver, error = %d\n", + error); + return 1; + } + siolx_driver_registered = 1; + siolx_callout_driver_registered = 1; + return 0; /* success */ +} + +static int siolx_init_drivers(void) +{ + if (!(tmp_buf = (unsigned char *) get_free_page(GFP_KERNEL))) + { + printk(KERN_ERR "siolx: Couldn't get free page.\n"); + return 1; + } + init_bh(siolx_bhindex, do_siolx_bh); + memset(&siolx_driver, 0, sizeof(siolx_driver)); + siolx_driver.magic = TTY_DRIVER_MAGIC; + + siolx_driver.driver_name = "aurasiolx"; + siolx_driver.name = "ttyS"; + siolx_driver.major = siolx_major; +#ifdef MODULE + siolx_driver.minor_start = siolx_minorstart; /* changed from command line */ +#else + siolx_driver.minor_start = GetMinorStart(); +#endif + siolx_driver.num = 0; /* will be changed */ + + siolx_driver.type = TTY_DRIVER_TYPE_SERIAL; + siolx_driver.subtype = SIOLX_TYPE_NORMAL; + siolx_driver.init_termios = tty_std_termios; + siolx_driver.init_termios.c_cflag = + B9600 | CS8 | CREAD | HUPCL | CLOCAL; + siolx_driver.flags = TTY_DRIVER_REAL_RAW; + siolx_driver.refcount = &siolx_refcount; + + siolx_driver.table = siolx_table; /* will be changed */ + siolx_driver.termios = siolx_termios; /* will be changed */ + siolx_driver.termios_locked = siolx_termios_locked; /* will be changed */ + + siolx_driver.open = siolx_open; + siolx_driver.close = siolx_close; + siolx_driver.write = siolx_write; + siolx_driver.put_char = siolx_put_char; + siolx_driver.flush_chars = siolx_flush_chars; + siolx_driver.write_room = siolx_write_room; + siolx_driver.chars_in_buffer = siolx_chars_in_buffer; + siolx_driver.flush_buffer = siolx_flush_buffer; + siolx_driver.ioctl = siolx_ioctl; + siolx_driver.throttle = siolx_throttle; + siolx_driver.unthrottle = siolx_unthrottle; + siolx_driver.set_termios = siolx_set_termios; + siolx_driver.stop = siolx_stop; + siolx_driver.start = siolx_start; + siolx_driver.hangup = siolx_hangup; + + siolx_callout_driver = siolx_driver; + siolx_callout_driver.name = "cuw"; + siolx_callout_driver.major = (siolx_major+1); + siolx_callout_driver.subtype = SIOLX_TYPE_CALLOUT; + + siolx_driver.read_proc = siolx_read_proc; + return 0; +} + + +static void siolx_release_drivers(void) +{ + unsigned int intr_val; + struct siolx_board *bp; + + if(tmp_buf) + { + free_page((unsigned long)tmp_buf); + tmp_buf = NULL; + } + if(siolx_driver_registered) + { + tty_unregister_driver(&siolx_driver); + siolx_driver_registered = 0; + } + if(siolx_callout_driver_registered) + { + tty_unregister_driver(&siolx_callout_driver); + siolx_callout_driver_registered = 0; + } + /* unallocate and turn off ints */ + for(intr_val = 0; intr_val < SIOLX_NUMINTS; ++intr_val) + { + if(SiolxIrqRoot[intr_val] != NULL) + { + for(bp = SiolxIrqRoot[intr_val]; bp != NULL; + bp = bp->next_by_interrupt) + { + SiolxShutdownBoard(bp); /* turn off int; release the plx vaddr space */ + } + free_irq(intr_val, &SiolxIrqRoot[intr_val]); + } + } + +} + +static void siolx_release_memory(void) +{ + register struct siolx_board *bp; + register struct siolx_port *port; + + while(siolx_board_root) + { + bp = siolx_board_root; + siolx_board_root = bp->next_by_global_list; + siolx_release_io_range(bp); /* releases the chip vaddr */ + kfree(bp); + } + while(siolx_port_root) + { + port = siolx_port_root; + if(port->xmit_buf) + { /* should have been done when port shutdown */ + free_page((unsigned long) port->xmit_buf); + port->xmit_buf = NULL; + } + siolx_port_root = port->next_by_global_list; + kfree(port); + } + if(siolx_table) + { + kfree(siolx_table); + siolx_table = NULL; + } + if(siolx_termios) + { + kfree(siolx_termios); + siolx_termios = NULL; + } + if(siolx_termios_locked) + { + kfree(siolx_termios_locked); + siolx_termios_locked = NULL; + } + +#ifdef SIOLX_TIMER + del_timer (&missed_irq_timer); +#endif +} + + +static void siolx_cleanup(void) +{ + siolx_release_drivers(); + siolx_release_memory(); +} + +/* + * This routine must be called by kernel at boot time + */ + +static int __init siolx_init(void) +{ + unsigned char bus; + unsigned char devfn; + struct siolx_board *bp; + struct siolx_board *bp2; + unsigned int boardcount; + struct pci_dev *pdev = NULL; + unsigned int ecntl; + unsigned int intr_val; + + printk(KERN_ALERT "aurora interea miseris mortalibus almam extulerat lucem\n"); + printk(KERN_ALERT " referens opera atque labores\n"); + printk(KERN_INFO "siolx: Siolx Aurora Asynchronous Adapter driver v" VERSION ", (c) Telford Tools, Inc.\n"); +#ifdef CONFIG_SIOLX_RTSCTS + printk (KERN_INFO "siolx: DTR/RTS pin is always RTS.\n"); +#else + printk (KERN_INFO "siolx: DTR/RTS pin is RTS when CRTSCTS is on.\n"); +#endif + memset(SiolxIrqRoot, 0, sizeof(SiolxIrqRoot)); + tmp_buf = NULL; + siolx_board_root = NULL; /* clear out the global pointers */ + siolx_board_last = NULL; + siolx_port_root = NULL; + siolx_port_last = NULL; + NumSiolxPorts = 0; + siolx_table = NULL; /* make dynamic */ + siolx_termios = NULL; + siolx_termios_locked = NULL; + siolx_driver_registered = 0; + siolx_callout_driver_registered = 0; + + boardcount = 0; + + if (siolx_init_drivers()) + { + printk(KERN_INFO "siolx: Could not initialize drivers.\n"); + return -EIO; + } + + if (!pci_present()) + { + printk(KERN_INFO "siolx: Could not find PCI bus.\n"); + return -EIO; /* no PCI bus no Aurora cards */ + } + + while(1) + { + pdev = pci_find_device (siolx_vendor_id, siolx_device_id, pdev); + if (!pdev) + { + break; /* found no devices */ + } + + DEBUGPRINT((KERN_ALERT "%s\n", pdev->name)); + DEBUGPRINT((KERN_ALERT "subsystem vendor is %x.\n", + pdev->subsystem_vendor)); + DEBUGPRINT((KERN_ALERT "subsystem device is %x.\n", + pdev->subsystem_device)); + DEBUGPRINT((KERN_ALERT + "BAR0 = %lx\nBAR1 = %lx\nBAR2 = %lx\nBAR3 = %lx\nBAR4 = %lx\nBAR5 = %lx\n", + pci_resource_start(pdev, 0), + pci_resource_start(pdev, 1), + pci_resource_start(pdev, 2), + pci_resource_start(pdev, 3), + pci_resource_start(pdev, 4), + pci_resource_start(pdev, 5))); + DEBUGPRINT((KERN_ALERT + "LAS0 = %lx\nLAS1 = %lx\nLAS2 = %lx\nLAS3 = %lx\nLAS4 = %lx\nLAS5 = %lx\n", + pci_resource_len(pdev, 0), + pci_resource_len(pdev, 1), + pci_resource_len(pdev, 2), + pci_resource_len(pdev, 3), + pci_resource_len(pdev, 4), + pci_resource_len(pdev, 5))); + + if(pdev->subsystem_vendor == siolx_subsystem_vendor) + { + if(pdev->subsystem_device == siolx_subsystem_pci_device) + { + bp = (struct siolx_board*)kmalloc(sizeof(struct siolx_board), GFP_KERNEL); + if(bp == NULL) + { + printk(KERN_ALERT "siolx: Failed to create board structure on board %d.\n", boardcount); + break; /* no memory available */ + } + memset(bp, 0, sizeof(struct siolx_board)); + bp->boardtype = BD_8000P; + } + else if(pdev->subsystem_device == siolx_subsystem_cpci_device) + { + bp = (struct siolx_board*)kmalloc(sizeof(struct siolx_board), GFP_KERNEL); + if(bp == NULL) + { + printk(KERN_ALERT + "siolx: Failed to create board structure on board%p.\n", bp); + break; /* no memory available */ + } + memset(bp, 0, sizeof(struct siolx_board)); + bp->boardtype = BD_8000C; + } + else + { + continue; + } + } + else + { + continue; + } + + DEBUGPRINT((KERN_ALERT "siolx: interrupt is %i.\n", pdev->irq)); + bus = pdev->bus->number; + devfn = pdev->devfn; + DEBUGPRINT((KERN_ALERT "siolx: bus is %x, slot is %x.\n", bus, PCI_SLOT(devfn))); + + if (pci_enable_device(pdev)) + { + kfree(bp); + continue; /* enable failed */ + } + pci_set_master(pdev); + + bp->irq = pdev->irq; + SiolxResetBoard(bp, pdev); /* make sure the board is in a known state */ + if(bp->plx_vaddr == 0) + { + printk(KERN_ALERT "siolx: failed to remap plx address space.\n"); + kfree(bp); + continue; + } + bp->vaddr = (unsigned long) ioremap_nocache(pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); + if(bp->vaddr) + { + bp->base = (bp->vaddr + MPASYNC_CHIP1_OFFSET); + bp->boardnumber = boardcount; + if (siolx_probe(bp)) /* failure is nonzero */ + { + iounmap((void*)bp->plx_vaddr); + bp->plx_vaddr = 0; + iounmap((void*)bp->vaddr); + bp->vaddr = 0; + kfree(bp); /* something wrong with board */ + continue; + } + intr_val = bp->irq; + if((intr_val < 0) || (intr_val >= SIOLX_NUMINTS)) + { + printk(KERN_ALERT "siolx: bad interrupt %i board %p.\n", intr_val, bp); + iounmap((void*)bp->plx_vaddr); /* but plx space was remapped */ + bp->plx_vaddr = 0; + iounmap((void*)bp->vaddr); /* release chip space */ + bp->vaddr = 0; + kfree(bp); /* release the board structure */ + continue; + } + bp->next_by_interrupt = SiolxIrqRoot[intr_val]; + SiolxIrqRoot[intr_val] = bp; + if(siolx_board_last == NULL) + { + siolx_board_root = bp; + siolx_board_last = bp; + } + else + { + siolx_board_last->next_by_global_list = bp; + siolx_board_last = bp; + } + bp->chipnumber = 0; + bp->intstatus = bp->plx_vaddr + PLX_ICSR; + bp->next_by_chain = bp; /* one item chain */ + ecntl = readl(bp->plx_vaddr + PLX_ECNTL); + boardcount++; /* added a board */ + if(pci_resource_len(pdev, 2) > MPASYNC_CHIP2_OFFSET) + { + ++(bp->boardtype); /* works because how types are defined 8000X --> 16000X*/ + if(bp->boardtype == BD_16000C) + { + if((ecntl & PLX_ECNTLUSERI) == 0) + { + bp->reario = 1; + } + } + bp2 = (struct siolx_board*)kmalloc(sizeof(struct siolx_board), GFP_KERNEL); + if(bp2 == NULL) + { + printk(KERN_ALERT + "siolx: Failed to create second board structure on board %p.\n", bp); + /* fall through because must turn on ints for other chip */ + } + else + { + memset(bp2, 0, sizeof(struct siolx_board)); /* unnecessary */ + *bp2 = *bp; /* note that all guys in chain point to same next_by interrupt */ + bp->next_by_chain = bp2; /* circular list */ + bp2->next_by_chain = bp;/* now chain two elements*/ + ++(bp2->chipnumber); /* chipnumber 1 */ + bp2->base = (bp2->vaddr + MPASYNC_CHIP2_OFFSET); + if(siolx_probe(bp2)) + { + printk(KERN_ALERT "siolx: Failed to probe second board structure on board %p.\n", bp); + kfree(bp2); + /* fall through because must turn on ints for other chip */ + /* don't release pci memory remap -- still works for other chip */ + } + else if(siolx_board_last == NULL) + { + siolx_board_root = bp2; /* this case should not occur */ + siolx_board_last = bp2; + } + else + { + siolx_board_last->next_by_global_list = bp2; + siolx_board_last = bp2; + } + /* don't increment boardnumber */ + } + } + } + else /* could not remap the cd18xx space */ + { + iounmap((void*)bp->plx_vaddr); /* but plx space was remapped */ + bp->plx_vaddr = 0; + kfree(bp); + } + } + if (boardcount == 0) + { + printk(KERN_INFO "siolx: No Aurora Asynchronous Adapter boards detected.\n"); + siolx_cleanup(); /* don't need any allocated memory */ + return -EIO; + } + if (siolx_finish_init_drivers()) + { + printk(KERN_INFO "siolx: Could not finish driver initialization.\n"); + siolx_cleanup(); + return -EIO; + } + + for(intr_val = 0; intr_val < SIOLX_NUMINTS; ++intr_val) /* trying to install as few int handlers as possible */ + { /* one for each group of boards (actually chips) on a given irq */ + if(SiolxIrqRoot[intr_val] != NULL) + { + if (request_irq(intr_val, siolx_interrupt, SA_SHIRQ, "siolx Aurora Asynchronous Adapter", + &SiolxIrqRoot[intr_val]) == 0) + /* interrupts on perboard basis + * cycle through chips and then + * ports */ + /* NOTE PLX INTS ARE OFF -- so turn them on */ + { + for(bp = SiolxIrqRoot[intr_val]; bp != NULL; bp = bp->next_by_interrupt) + { + writel(PLX_ICSRLCLINTPCI | PLX_ICSRPCIINTS, bp->plx_vaddr + PLX_ICSR); /* enable interrupts */ + } + } + else + { + printk(KERN_ALERT "siolx: Unable to get interrupt, board set up not complete %i.\n", intr_val); + /* no interrupts but on all lists */ + } + } + } + return 0; +} + +module_init(siolx_init); +module_exit(siolx_cleanup); +MODULE_DESCRIPTION("multiport Aurora asynchronous driver"); +MODULE_AUTHOR("Joachim Martillo "); +MODULE_LICENSE("GPL"); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/cd1865/cdsiolx.h linux.21pre4-ac2/drivers/char/cd1865/cdsiolx.h --- linux.21pre4/drivers/char/cd1865/cdsiolx.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/char/cd1865/cdsiolx.h 2003-02-01 16:51:26.000000000 +0000 @@ -0,0 +1,136 @@ +/* -*- linux-c -*- */ +/* + * This file was modified from + * linux/drivers/char/siolx_io8.h -- + * Siolx IO8+ multiport serial driver. + * + * Copyright (C) 1997 Roger Wolff (R.E.Wolff@BitWizard.nl) + * Copyright (C) 1994-1996 Dmitry Gorodchanin (pgmdsg@ibi.com) + * Modifications (C) 2002 Telford Tools, Inc. (martillo@telfordtools.com) + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + * PURPOSE. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, + * USA. + * */ + +#ifndef __LINUX_SIOLX_H +#define __LINUX_SIOLX_H + +#include + +#ifdef __KERNEL__ + +#define SIOLX_NBOARD 8 + +/* eight ports per chip. */ +#define SIOLX_NPORT 8 +#define SIOLX_PORT(line) ((line) & (SIOLX_NPORT - 1)) + +#define MHz *1000000 /* I'm ashamed of myself. */ + +/* On-board oscillator frequency */ +#define SIOLX_OSCFREQ (33 MHz) +/* oregano is in /1 which mace 66Mhz is in /2 mode */ + +/* Ticks per sec. Used for setting receiver timeout and break length */ +#define SIOLX_TPS 4000 + +/* Yeah, after heavy testing I decided it must be 6. + * Sure, You can change it if needed. + */ +#define SIOLX_RXFIFO 6 /* Max. receiver FIFO size (1-8) */ + +#define SIOLX_MAGIC 0x0907 + +#define SIOLX_CCR_TIMEOUT 10000 /* CCR timeout. You may need to wait upto + 10 milliseconds before the internal + processor is available again after + you give it a command */ +#define SIOLX_NUMINTS 32 + +struct siolx_board +{ + unsigned long flags; + unsigned long base; + unsigned char irq; + unsigned char DTR; + unsigned long vaddr; + unsigned long plx_vaddr; + unsigned long intstatus; + struct siolx_board *next_by_chain; /* chains are circular */ + struct siolx_board *next_by_interrupt; /* only chip 0 */ + struct siolx_board *next_by_global_list; /* all boards not circular */ + struct siolx_port *portlist; + struct pci_dev pdev; + unsigned int chipnumber; /* for 8000X this structure really defines the board + * for 16000X the chain corresponds to a board and each + * structure corresponds to a dhip on a single board */ + unsigned int boardnumber; /* same for all boards/chips in a board chain */ + unsigned int boardtype; + unsigned int chiptype; + unsigned int chiprev; + unsigned int reario; + unsigned int rj45; +}; + +#define DRIVER_DEBUG() (siolx_debug) +#define DEBUGPRINT(arg) if(DRIVER_DEBUG()) printk arg + +struct siolx_port +{ + int magic; + int baud_base; + int flags; + struct tty_struct * tty; + int count; + int blocked_open; + int event; + int timeout; + int close_delay; + long session; + long pgrp; + unsigned char * xmit_buf; + int custom_divisor; + int xmit_head; + int xmit_tail; + int xmit_cnt; + struct termios normal_termios; + struct termios callout_termios; + wait_queue_head_t open_wait; + wait_queue_head_t close_wait; + struct tq_struct tqueue; + struct tq_struct tqueue_hangup; + short wakeup_chars; + short break_length; + unsigned short closing_wait; + unsigned char mark_mask; + unsigned char IER; + unsigned char MSVR; + unsigned char COR2; +#ifdef SIOLX_REPORT_OVERRUN + unsigned long overrun; +#endif +#ifdef SIOLX_REPORT_FIFO + unsigned long hits[10]; +#endif + struct siolx_port *next_by_global_list; + struct siolx_port *next_by_board; + struct siolx_board *board; + unsigned int boardport; /* relative to chain 0-15 for 16000X */ + unsigned int driverport; /* maps to minor device number */ +}; + +#endif /* __KERNEL__ */ +#endif /* __LINUX_SIOLX_H */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/cd1865/Makefile linux.21pre4-ac2/drivers/char/cd1865/Makefile --- linux.21pre4/drivers/char/cd1865/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/char/cd1865/Makefile 2003-01-09 01:03:03.000000000 +0000 @@ -0,0 +1,27 @@ +# Copyright (C) 2001 By Joachim Martillo, Telford Tools, Inc. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version +# 2 of the License, or (at your option) any later version. + +# File: drivers/net/WAN/atiXX50/Makefile +# +# Makefile for the Aurora ESSC based cards +# Specifically the 2520, 4020, 4520, 8520 +# + +all: SILX.o + +O_TARGET := SILX.o + +obj-y := cd1865.o +obj-m := $(O_TARGET) + +EXTRA_CFLAGS += -I. + +include $(TOPDIR)/Rules.make + +clean: + rm -f core *.o *.a *.s *~ + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/cd1865/plx9060.h linux.21pre4-ac2/drivers/char/cd1865/plx9060.h --- linux.21pre4/drivers/char/cd1865/plx9060.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/char/cd1865/plx9060.h 2003-01-09 01:07:57.000000000 +0000 @@ -0,0 +1,97 @@ +#ifndef _PLX9060_H_ +#define _PLX9060_H_ +/* + * Aurora Cirrus CL-CD180/1865 Async Driver (sio16) + * + * This module contains the definitions for the PLX + * 9060SD PCI controller chip. + * + * COPYRIGHT (c) 1996-1998 BY AURORA TECHNOLOGIES, INC., WALTHAM, MA. + * Modifications Copyright (C) 2002 By Telford Tools, Inc., Boston, MA. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + * PURPOSE. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, + * USA. + * + * file: plx9060.h + * author: cmw + * created: 11/21/1996 + * info: $Id: plx9060.h,v 1.2 2002/06/11 02:50:02 martillo Exp $ + */ + +/* + * $Log: plx9060.h,v $ + * Revision 1.2 2002/06/11 02:50:02 martillo + * using silx_ and SILX_ instead of sx_ and SX_ + * + * Revision 1.1 2002/05/21 17:30:16 martillo + * first pass for the sio16 driver. + * + * Revision 1.4 1999/02/12 15:38:13 bkd + * Changed PLX_ECNTUSER0 to PLX_ECNTLUSERO and added PLX_ECNTLUSERI. + * + * Revision 1.3 1998/03/23 19:35:42 bkd + * Added definitions for all of the missing PLX9060SD registers. + * + * Revision 1.2 1998/03/13 21:02:16 bkd + * Updated copyright date to include 1998. + * + * Revision 1.1 1996/11/23 01:07:46 bkd + * cmw/bkd (PCI port): + * Initial check-in. + * + */ + +/* + * Register definitions + */ + +#define PLX_LAS0RR 0x00 +#define PLX_LAS0BAR 0x04 +#define PLX_LAR 0x08 +#define PLX_ENDR 0x0c +#define PLX_EROMRR 0x10 +#define PLX_EROMBAR 0x14 +#define PLX_LAS0BRD 0x18 +#define PLX_LAS1RR 0x30 +#define PLX_LAS1BAR 0x34 +#define PLX_LAS1BRD 0x38 + +#define PLX_MBR0 0x40 +#define PLX_MBR1 0x44 +#define PLX_MBR2 0x48 +#define PLX_MBR3 0x4c +#define PLX_PCI2LCLDBR 0x60 +#define PLX_LCL2PCIDBR 0x64 +#define PLX_ICSR 0x68 +#define PLX_ECNTL 0x6c + +/* + * Bit definitions + */ + +#define PLX_ECNTLUSERO 0x00010000 /* turn on user output */ +#define PLX_ECNTLUSERI 0x00020000 /* user input */ +#define PLX_ECNTLLDREG 0x20000000 /* reload configuration registers */ +#define PLX_ECNTLLCLRST 0x40000000 /* local bus reset */ +#define PLX_ECNTLINITSTAT 0x80000000 /* mark board init'ed */ + + +#define PLX_ICSRLSERR_ENA 0x00000001 /* enable local bus LSERR# */ +#define PLX_ICSRLSERRP_ENA 0x00000002 /* enable local bus LSERR# PCI */ +#define PLX_ICSRPCIINTS 0x00000100 /* enable PCI interrupts */ +#define PLX_ICSRLCLINTPCI 0x00000800 +#define PLX_ICSRINTACTIVE 0x00008000 /* RO: local interrupt active */ + +#endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/cd1865/siolx.h linux.21pre4-ac2/drivers/char/cd1865/siolx.h --- linux.21pre4/drivers/char/cd1865/siolx.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/char/cd1865/siolx.h 2003-01-09 01:13:34.000000000 +0000 @@ -0,0 +1,94 @@ +/* -*- linux-c -*- */ +#ifndef _SIOLX_H_ +#define _SIOLX_H_ + +/* + * Modifications Copyright (C) 2002 By Telford Tools, Inc., Boston, MA. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + * PURPOSE. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, + * USA. + */ + +#define AURASUBSYSTEM_VENDOR_ID 0x125c +#define AURASUBSYSTEM_MPASYNCPCI 0x0640 +#define AURASUBSYSTEM_MPASYNCcPCI 0x0641 + +/* + * Aurora Cirrus CL-CD180/1865 Async Driver (sio16) + * + */ + +/* + * Register sets. These must match the order of the registers specified + * in the prom on the board! + */ + +#define MPASYNC_REG_CSR 1 +#define MPASYNC_REG_CD 2 + +#define MPASYNC_CHIP1_OFFSET 0x080 +#define MPASYNC_CHIP2_OFFSET 0x100 + +#define MPASYNC_REG_NO_OBP_CSR 1 +#define MPASYNC_REG_NO_OBP_CD 3 + +#define TX_FIFO 0x8 /* how deep is the chip fifo */ + +/* + * state flags + */ + +/* + * the following defines the model types + */ + +#define OREGANO_MODEL(mod) ((mod) == BD_16000P || (mod) == BD_8000P) +#define MACE_MODEL(mod) ((mod) == BD_16000C || (mod) == BD_8000C) + +/* + * I/O options: + */ + +#define MACE8_STD 0x0 /* 8000CP -- standard I/O */ +#define MACE8_RJ45 0x1 /* 8000CP -- rear RJ45 I/O */ + +#define MACE16_STD 0x0 /* 16000CP -- standard I/O */ +#define MACE16_RJ45 0x1 /* 16000CP -- rear RJ45 I/O */ + +#define SE2_CLK ((unsigned int) 11059200) /* 11.0592 MHz */ +#define SE_CLK ((unsigned int) 14745600) /* 14.7456 MHz */ +#define SE3_CLK ((unsigned int) 33000000) /* 33.3333 MHz */ + +/* divide x by y, rounded */ +#define ROUND_DIV(x, y) (((x) + ((y) >> 1)) / (y)) + +/* Calculate a 16 bit baud rate divisor for the given "encoded" + * (multiplied by two) baud rate. + */ + +/* chip types: */ +#define CT_UNKNOWN 0x0 /* unknown */ +#define CT_CL_CD180 0x1 /* Cirrus Logic CD-180 */ +#define CT_CL_CD1864 0x2 /* Cirrus Logic CD-1864 */ +#define CT_CL_CD1865 0x3 /* Cirrus Logic CD-1864 */ + +/* chip revisions: */ +#define CR_UNKNOWN 0x0 /* unknown */ +#define CR_REVA 0x1 /* revision A */ +#define CR_REVB 0x2 /* revision B */ +#define CR_REVC 0x3 /* revision C */ +/* ...and so on ... */ + +#endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/Config.in linux.21pre4-ac2/drivers/char/Config.in --- linux.21pre4/drivers/char/Config.in 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/Config.in 2003-01-28 16:43:43.000000000 +0000 @@ -35,6 +35,7 @@ fi bool 'Non-standard serial port support' CONFIG_SERIAL_NONSTANDARD if [ "$CONFIG_SERIAL_NONSTANDARD" = "y" ]; then + tristate ' Aurora Technology, Inc. asynchronous PCI cards V2' CONFIG_ATI_CD1865 tristate ' Computone IntelliPort Plus serial support' CONFIG_COMPUTONE tristate ' Comtrol Rocketport support' CONFIG_ROCKETPORT tristate ' Cyclades async mux support' CONFIG_CYCLADES @@ -212,6 +213,7 @@ bool ' Disable watchdog shutdown on close' CONFIG_WATCHDOG_NOWAYOUT tristate ' Acquire SBC Watchdog Timer' CONFIG_ACQUIRE_WDT tristate ' Advantech SBC Watchdog Timer' CONFIG_ADVANTECH_WDT + tristate ' ALi M7101 PMU on ALi 1535D+ Watchdog Timer' CONFIG_ALIM1535_WDT tristate ' ALi M7101 PMU Watchdog Timer' CONFIG_ALIM7101_WDT tristate ' AMD "Elan" SC520 Watchdog Timer' CONFIG_SC520_WDT tristate ' Berkshire Products PC Watchdog' CONFIG_PCWATCHDOG @@ -258,7 +260,7 @@ dep_tristate 'NatSemi SCx200 GPIO Support' CONFIG_SCx200_GPIO $CONFIG_SCx200 if [ "$CONFIG_X86" = "y" -o "$CONFIG_X86_64" = "y" ]; then - dep_tristate 'AMD 768 Random Number Generator support' CONFIG_AMD_RNG $CONFIG_PCI + dep_tristate 'AMD 768/8111 Random Number Generator support' CONFIG_AMD_RNG $CONFIG_PCI fi if [ "$CONFIG_X86" = "y" -o "$CONFIG_IA64" = "y" ]; then dep_tristate 'Intel i8x0 Random Number Generator support' CONFIG_INTEL_RNG $CONFIG_PCI diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/Config.in linux.21pre4-ac2/drivers/char/drm/Config.in --- linux.21pre4/drivers/char/drm/Config.in 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/Config.in 2003-01-06 15:38:22.000000000 +0000 @@ -6,7 +6,7 @@ # tristate ' 3dfx Banshee/Voodoo3+' CONFIG_DRM_TDFX -#tristate ' 3dlabs GMX 2000' CONFIG_DRM_GAMMA +tristate ' 3dlabs GMX 2000' CONFIG_DRM_GAMMA tristate ' ATI Rage 128' CONFIG_DRM_R128 dep_tristate ' ATI Radeon' CONFIG_DRM_RADEON $CONFIG_AGP dep_tristate ' Intel I810' CONFIG_DRM_I810 $CONFIG_AGP diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/drm_agpsupport.h linux.21pre4-ac2/drivers/char/drm/drm_agpsupport.h --- linux.21pre4/drivers/char/drm/drm_agpsupport.h 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/drm_agpsupport.h 2003-02-04 15:54:22.000000000 +0000 @@ -283,8 +283,6 @@ break; case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro"; break; - case VIA_APOLLO_P4X400: head->chipset = "VIA Apollo P4X400"; - break; case SIS_GENERIC: head->chipset = "SiS"; break; case AMD_GENERIC: head->chipset = "AMD"; break; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/drm_context.h linux.21pre4-ac2/drivers/char/drm/drm_context.h --- linux.21pre4/drivers/char/drm/drm_context.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/drm_context.h 2003-02-04 15:54:22.000000000 +0000 @@ -554,7 +554,7 @@ /* Allocate a new queue */ down(&dev->struct_sem); - queue = gamma_alloc(sizeof(*queue), DRM_MEM_QUEUES); + queue = DRM(alloc)(sizeof(*queue), DRM_MEM_QUEUES); memset(queue, 0, sizeof(*queue)); atomic_set(&queue->use_count, 1); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/drm_dma.h linux.21pre4-ac2/drivers/char/drm/drm_dma.h --- linux.21pre4/drivers/char/drm/drm_dma.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/drm_dma.h 2003-02-04 15:54:22.000000000 +0000 @@ -30,7 +30,7 @@ */ #include "drmP.h" - +#include "drm_os_linux.h" #include /* For task queue support */ #ifndef __HAVE_DMA_WAITQUEUE @@ -537,8 +537,16 @@ dev->tq.data = dev; #endif +#if __HAVE_VBL_IRQ + init_waitqueue_head(&dev->vbl_queue); + + spin_lock_init( &dev->vbl_lock ); + + INIT_LIST_HEAD( &dev->vbl_sigs.head ); +#endif + /* Before installing handler */ - DRIVER_PREINSTALL(); + DRM(driver_irq_preinstall)(dev); /* Install handler */ ret = request_irq( dev->irq, DRM(dma_service), @@ -551,7 +559,7 @@ } /* After installing handler */ - DRIVER_POSTINSTALL(); + DRM(driver_irq_postinstall)(dev); return 0; } @@ -570,7 +578,7 @@ DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, irq ); - DRIVER_UNINSTALL(); + DRM(driver_irq_uninstall)( dev ); free_irq( irq, dev ); @@ -597,6 +605,117 @@ } } +#if __HAVE_VBL_IRQ + +int DRM(wait_vblank)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_wait_vblank_t vblwait; + struct timeval now; + int ret = 0; + unsigned int flags; + + if (!dev->irq) + return -EINVAL; + + DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data, + sizeof(vblwait) ); + + switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) { + case _DRM_VBLANK_RELATIVE: + vblwait.request.sequence += atomic_read( &dev->vbl_received ); + case _DRM_VBLANK_ABSOLUTE: + break; + default: + return -EINVAL; + } + + flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK; + + if ( flags & _DRM_VBLANK_SIGNAL ) { + unsigned long irqflags; + drm_vbl_sig_t *vbl_sig = kmalloc( sizeof( drm_vbl_sig_t ), GFP_KERNEL ); + + if ( !vbl_sig ) + return -ENOMEM; + + memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) ); + + vbl_sig->sequence = vblwait.request.sequence; + vbl_sig->info.si_signo = vblwait.request.signal; + vbl_sig->task = current; + + vblwait.reply.sequence = atomic_read( &dev->vbl_received ); + + /* Hook signal entry into list */ + spin_lock_irqsave( &dev->vbl_lock, irqflags ); + + list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head ); + + spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); + } else { + ret = DRM(vblank_wait)( dev, &vblwait.request.sequence ); + + do_gettimeofday( &now ); + vblwait.reply.tval_sec = now.tv_sec; + vblwait.reply.tval_usec = now.tv_usec; + } + + DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait, + sizeof(vblwait) ); + + return ret; +} + +void DRM(vbl_send_signals)( drm_device_t *dev ) +{ + struct list_head *entry, *tmp; + drm_vbl_sig_t *vbl_sig; + unsigned int vbl_seq = atomic_read( &dev->vbl_received ); + unsigned long flags; + + spin_lock_irqsave( &dev->vbl_lock, flags ); + + list_for_each_safe( entry, tmp, &dev->vbl_sigs.head ) { + + vbl_sig = (drm_vbl_sig_t *) entry; + + if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) { + + vbl_sig->info.si_code = atomic_read( &dev->vbl_received ); + send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task ); + + list_del( entry ); + + kfree( entry ); + } + } + + spin_unlock_irqrestore( &dev->vbl_lock, flags ); +} + +#endif /* __HAVE_VBL_IRQ */ + +#else + +int DRM(control)( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_control_t ctl; + + if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) ) + return -EFAULT; + + switch ( ctl.func ) { + case DRM_INST_HANDLER: + case DRM_UNINST_HANDLER: + return 0; + default: + return -EINVAL; + } +} + #endif /* __HAVE_DMA_IRQ */ #endif /* __HAVE_DMA */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/drm_drv.h linux.21pre4-ac2/drivers/char/drm/drm_drv.h --- linux.21pre4/drivers/char/drm/drm_drv.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/drm_drv.h 2003-01-31 11:54:14.000000000 +0000 @@ -115,18 +115,34 @@ #ifndef DRIVER_FOPS #define DRIVER_FOPS \ static struct file_operations DRM(fops) = { \ - owner: THIS_MODULE, \ - open: DRM(open), \ - flush: DRM(flush), \ - release: DRM(release), \ - ioctl: DRM(ioctl), \ - mmap: DRM(mmap), \ - read: DRM(read), \ - fasync: DRM(fasync), \ - poll: DRM(poll), \ + .owner = THIS_MODULE, \ + .open = DRM(open), \ + .flush = DRM(flush), \ + .release = DRM(release), \ + .ioctl = DRM(ioctl), \ + .mmap = DRM(mmap), \ + .read = DRM(read), \ + .fasync = DRM(fasync), \ + .poll = DRM(poll), \ } #endif +#ifndef MODULE +/* DRM(options) is called by the kernel to parse command-line options + * passed via the boot-loader (e.g., LILO). It calls the insmod option + * routine, drm_parse_drm. + */ +/* Use an additional macro to avoid preprocessor troubles */ +#define DRM_OPTIONS_FUNC DRM(options) +static int __init DRM(options)( char *str ) +{ + DRM(parse_options)( str ); + return 1; +} + +__setup( DRIVER_NAME "=", DRM_OPTIONS_FUNC ); +#undef DRM_OPTIONS_FUNC +#endif /* * The default number of instances (minor numbers) to initialize. @@ -187,10 +203,8 @@ /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ -#if __HAVE_DMA_IRQ [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { DRM(control), 1, 1 }, #endif -#endif #if __REALLY_HAVE_AGP [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { DRM(agp_acquire), 1, 1 }, @@ -208,6 +222,10 @@ [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { DRM(sg_free), 1, 1 }, #endif +#if __HAVE_VBL_IRQ + [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = { DRM(wait_vblank), 0, 0 }, +#endif + DRIVER_IOCTLS }; @@ -292,7 +310,7 @@ dev->map_count = 0; dev->vmalist = NULL; - dev->lock.hw_lock = NULL; + dev->sigdata.lock = dev->lock.hw_lock = NULL; init_waitqueue_head( &dev->lock.lock_queue ); dev->queue_count = 0; dev->queue_reserved = 0; @@ -477,7 +495,7 @@ DRM(dma_takedown)( dev ); #endif if ( dev->lock.hw_lock ) { - dev->lock.hw_lock = NULL; /* SHM removed */ + dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */ dev->lock.pid = 0; wake_up_interruptible( &dev->lock.lock_queue ); } @@ -705,7 +723,7 @@ int i; for (i = 0; i < DRM(numdevs); i++) { - if (MINOR(inode->i_rdev) == DRM(minor)[i]) { + if (minor(inode->i_rdev) == DRM(minor)[i]) { dev = &(DRM(device)[i]); break; } @@ -747,8 +765,8 @@ * Begin inline drm_release */ - DRM_DEBUG( "pid = %d, device = 0x%x, open_count = %d\n", - current->pid, dev->device, dev->open_count ); + DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n", + current->pid, (long)dev->device, dev->open_count ); if ( dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && @@ -873,8 +891,9 @@ atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] ); ++priv->ioctl_count; - DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%x, auth=%d\n", - current->pid, cmd, nr, dev->device, priv->authenticated ); + DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", + current->pid, cmd, nr, (long)dev->device, + priv->authenticated ); if ( nr >= DRIVER_IOCTL_COUNT ) { retcode = -EINVAL; @@ -1027,6 +1046,25 @@ atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] ); +#if __HAVE_KERNEL_CTX_SWITCH + /* We no longer really hold it, but if we are the next + * agent to request it then we should just be able to + * take it immediately and not eat the ioctl. + */ + dev->lock.pid = 0; + { + __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock; + unsigned int old, new, prev, ctx; + + ctx = lock.context; + do { + old = *plock; + new = ctx; + prev = cmpxchg(plock, old, new); + } while (prev != old); + } + wake_up_interruptible(&dev->lock.lock_queue); +#else DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT ); #if __HAVE_DMA_SCHEDULE @@ -1041,6 +1079,7 @@ DRM_ERROR( "\n" ); } } +#endif /* !__HAVE_KERNEL_CTX_SWITCH */ unblock_all_signals(); return 0; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/drm_fops.h linux.21pre4-ac2/drivers/char/drm/drm_fops.h --- linux.21pre4/drivers/char/drm/drm_fops.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/drm_fops.h 2003-02-04 15:54:22.000000000 +0000 @@ -37,7 +37,7 @@ int DRM(open_helper)(struct inode *inode, struct file *filp, drm_device_t *dev) { - kdev_t minor = MINOR(inode->i_rdev); + int minor = minor(inode->i_rdev); drm_file_t *priv; if (filp->f_flags & O_EXCL) return -EBUSY; /* No exclusive opens */ @@ -94,25 +94,8 @@ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; - DRM_DEBUG("pid = %d, device = 0x%x, open_count = %d\n", - current->pid, dev->device, dev->open_count); - if ( dev->lock.hw_lock && - _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && - dev->lock.pid == current->pid ) { - DRM_DEBUG( "Process %d closed fd, freeing lock for context %d\n", - current->pid, - _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) ); -#if __HAVE_RELEASE - DRIVER_RELEASE(); -#endif - DRM(lock_free)( dev, &dev->lock.hw_lock->lock, - _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) ); - - /* FIXME: may require heavy-handed reset of - hardware at this point, possibly - processed via a callback to the X - server. */ - } + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", + current->pid, (long)dev->device, dev->open_count); return 0; } @@ -122,7 +105,7 @@ drm_device_t *dev = priv->dev; int retcode; - DRM_DEBUG("fd = %d, device = 0x%x\n", fd, dev->device); + DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, (long)dev->device); retcode = fasync_helper(fd, filp, on, &dev->buf_async); if (retcode < 0) return retcode; return 0; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/drm.h linux.21pre4-ac2/drivers/char/drm/drm.h --- linux.21pre4/drivers/char/drm/drm.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/drm.h 2003-01-31 11:54:14.000000000 +0000 @@ -38,10 +38,27 @@ #if defined(__linux__) #include #include /* For _IO* macros */ -#define DRM_IOCTL_NR(n) _IOC_NR(n) -#elif defined(__FreeBSD__) +#define DRM_IOCTL_NR(n) _IOC_NR(n) +#define DRM_IOC_VOID _IOC_NONE +#define DRM_IOC_READ _IOC_READ +#define DRM_IOC_WRITE _IOC_WRITE +#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE +#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) +#elif defined(__FreeBSD__) || defined(__NetBSD__) +#if defined(__FreeBSD__) && defined(XFree86Server) +/* Prevent name collision when including sys/ioccom.h */ +#undef ioctl #include -#define DRM_IOCTL_NR(n) ((n) & 0xff) +#define ioctl(a,b,c) xf86ioctl(a,b,c) +#else +#include +#endif /* __FreeBSD__ && xf86ioctl */ +#define DRM_IOCTL_NR(n) ((n) & 0xff) +#define DRM_IOC_VOID IOC_VOID +#define DRM_IOC_READ IOC_OUT +#define DRM_IOC_WRITE IOC_IN +#define DRM_IOC_READWRITE IOC_INOUT +#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) #endif #define XFREE86_VERSION(major,minor,patch,snap) \ @@ -84,6 +101,10 @@ /* Warning: If you change this structure, make sure you change * XF86DRIClipRectRec in the server as well */ +/* KW: Actually it's illegal to change either for + * backwards-compatibility reasons. + */ + typedef struct drm_clip_rect { unsigned short x1; unsigned short y1; @@ -332,6 +353,32 @@ int funcnum; } drm_irq_busid_t; +typedef enum { + _DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */ + _DRM_VBLANK_RELATIVE = 0x1, /* Wait for given number of vblanks */ + _DRM_VBLANK_SIGNAL = 0x80000000 /* Send signal instead of blocking */ +} drm_vblank_seq_type_t; + +#define _DRM_VBLANK_FLAGS_MASK _DRM_VBLANK_SIGNAL + +struct drm_wait_vblank_request { + drm_vblank_seq_type_t type; + unsigned int sequence; + unsigned long signal; +}; + +struct drm_wait_vblank_reply { + drm_vblank_seq_type_t type; + unsigned int sequence; + long tval_sec; + long tval_usec; +}; + +typedef union drm_wait_vblank { + struct drm_wait_vblank_request request; + struct drm_wait_vblank_reply reply; +} drm_wait_vblank_t; + typedef struct drm_agp_mode { unsigned long mode; } drm_agp_mode_t; @@ -371,10 +418,9 @@ #define DRM_IOCTL_BASE 'd' #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) -#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size) -#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size) -#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size) - +#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) +#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) +#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) #define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t) #define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t) @@ -427,86 +473,10 @@ #define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, drm_scatter_gather_t) #define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, drm_scatter_gather_t) -/* MGA specific ioctls */ -#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t) -#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x41, drm_lock_t) -#define DRM_IOCTL_MGA_RESET DRM_IO( 0x42) -#define DRM_IOCTL_MGA_SWAP DRM_IO( 0x43) -#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x44, drm_mga_clear_t) -#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x45, drm_mga_vertex_t) -#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t) -#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x47, drm_mga_iload_t) -#define DRM_IOCTL_MGA_BLIT DRM_IOW( 0x48, drm_mga_blit_t) - -/* i810 specific ioctls */ -#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t) -#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t) -#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t) -#define DRM_IOCTL_I810_FLUSH DRM_IO( 0x43) -#define DRM_IOCTL_I810_GETAGE DRM_IO( 0x44) -#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t) -#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46) -#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t) -#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48) -#define DRM_IOCTL_I810_OV0INFO DRM_IOR( 0x49, drm_i810_overlay_t) -#define DRM_IOCTL_I810_FSTATUS DRM_IO ( 0x4a) -#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( 0x4b) -#define DRM_IOCTL_I810_MC DRM_IOW( 0x4c, drm_i810_mc_t) -#define DRM_IOCTL_I810_RSTATUS DRM_IO ( 0x4d ) - - -/* Rage 128 specific ioctls */ -#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) -#define DRM_IOCTL_R128_CCE_START DRM_IO( 0x41) -#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( 0x42, drm_r128_cce_stop_t) -#define DRM_IOCTL_R128_CCE_RESET DRM_IO( 0x43) -#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( 0x44) -#define DRM_IOCTL_R128_RESET DRM_IO( 0x46) -#define DRM_IOCTL_R128_SWAP DRM_IO( 0x47) -#define DRM_IOCTL_R128_CLEAR DRM_IOW( 0x48, drm_r128_clear_t) -#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x49, drm_r128_vertex_t) -#define DRM_IOCTL_R128_INDICES DRM_IOW( 0x4a, drm_r128_indices_t) -#define DRM_IOCTL_R128_BLIT DRM_IOW( 0x4b, drm_r128_blit_t) -#define DRM_IOCTL_R128_DEPTH DRM_IOW( 0x4c, drm_r128_depth_t) -#define DRM_IOCTL_R128_STIPPLE DRM_IOW( 0x4d, drm_r128_stipple_t) -#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(0x4f, drm_r128_indirect_t) -#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( 0x50, drm_r128_fullscreen_t) - -/* Radeon specific ioctls */ -#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x40, drm_radeon_init_t) -#define DRM_IOCTL_RADEON_CP_START DRM_IO( 0x41) -#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( 0x42, drm_radeon_cp_stop_t) -#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( 0x43) -#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( 0x44) -#define DRM_IOCTL_RADEON_RESET DRM_IO( 0x45) -#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( 0x46, drm_radeon_fullscreen_t) -#define DRM_IOCTL_RADEON_SWAP DRM_IO( 0x47) -#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( 0x48, drm_radeon_clear_t) -#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( 0x49, drm_radeon_vertex_t) -#define DRM_IOCTL_RADEON_INDICES DRM_IOW( 0x4a, drm_radeon_indices_t) -#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t) -#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t) -#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t) - -/* SiS specific ioctls */ -#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t) -#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t) -#define SIS_IOCTL_AGP_INIT DRM_IOWR(0x53, drm_sis_agp_t) -#define SIS_IOCTL_AGP_ALLOC DRM_IOWR(0x54, drm_sis_mem_t) -#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t) -#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t) -#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49) -#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50) - -/* I830 specific ioctls */ -#define DRM_IOCTL_I830_INIT DRM_IOW( 0x40, drm_i830_init_t) -#define DRM_IOCTL_I830_VERTEX DRM_IOW( 0x41, drm_i830_vertex_t) -#define DRM_IOCTL_I830_CLEAR DRM_IOW( 0x42, drm_i830_clear_t) -#define DRM_IOCTL_I830_FLUSH DRM_IO ( 0x43) -#define DRM_IOCTL_I830_GETAGE DRM_IO ( 0x44) -#define DRM_IOCTL_I830_GETBUF DRM_IOWR(0x45, drm_i830_dma_t) -#define DRM_IOCTL_I830_SWAP DRM_IO ( 0x46) -#define DRM_IOCTL_I830_COPY DRM_IOW( 0x47, drm_i830_copy_t) -#define DRM_IOCTL_I830_DOCOPY DRM_IO ( 0x48) +#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, drm_wait_vblank_t) + +/* Device specfic ioctls should only be in their respective headers + * The device specific ioctl range is 0x40 to 0x79. */ +#define DRM_COMMAND_BASE 0x40 #endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/drm_ioctl.h linux.21pre4-ac2/drivers/char/drm/drm_ioctl.h --- linux.21pre4/drivers/char/drm/drm_ioctl.h 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/drm_ioctl.h 2003-02-04 15:54:22.000000000 +0000 @@ -111,7 +111,7 @@ do { struct pci_dev *pci_dev; - int b, d, f; + int domain, b, d, f; char *p; for(p = dev->unique; p && *p && *p != ':'; p++); @@ -123,6 +123,27 @@ f = (int)simple_strtoul(p+1, &p, 10); if (*p) break; + domain = b >> 8; + b &= 0xff; + +#ifdef __alpha__ + /* + * Find the hose the device is on (the domain number is the + * hose index) and offset the bus by the root bus of that + * hose. + */ + for(pci_dev = pci_find_device(PCI_ANY_ID,PCI_ANY_ID,NULL); + pci_dev; + pci_dev = pci_find_device(PCI_ANY_ID,PCI_ANY_ID,pci_dev)) { + struct pci_controller *hose = pci_dev->sysdata; + + if (hose->index == domain) { + b += hose->bus->number; + break; + } + } +#endif + pci_dev = pci_find_slot(b, PCI_DEVFN(d,f)); if (pci_dev) { dev->pdev = pci_dev; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/drm_lock.h linux.21pre4-ac2/drivers/char/drm/drm_lock.h --- linux.21pre4/drivers/char/drm/drm_lock.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/drm_lock.h 2003-02-04 15:54:22.000000000 +0000 @@ -236,7 +236,7 @@ /* Allow signal delivery if lock isn't held */ - if (!_DRM_LOCK_IS_HELD(s->lock->lock) + if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock) || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context) return 1; /* Otherwise, set flag to force call to diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/drm_memory.h linux.21pre4-ac2/drivers/char/drm/drm_memory.h --- linux.21pre4/drivers/char/drm/drm_memory.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/drm_memory.h 2003-02-04 15:54:22.000000000 +0000 @@ -313,6 +313,29 @@ return pt; } +void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size) +{ + void *pt; + + if (!size) { + DRM_MEM_ERROR(DRM_MEM_MAPPINGS, + "Mapping 0 bytes at 0x%08lx\n", offset); + return NULL; + } + + if (!(pt = ioremap_nocache(offset, size))) { + spin_lock(&DRM(mem_lock)); + ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count; + spin_unlock(&DRM(mem_lock)); + return NULL; + } + spin_lock(&DRM(mem_lock)); + ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count; + DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size; + spin_unlock(&DRM(mem_lock)); + return pt; +} + void DRM(ioremapfree)(void *pt, unsigned long size) { int alloc_count; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/drm_os_linux.h linux.21pre4-ac2/drivers/char/drm/drm_os_linux.h --- linux.21pre4/drivers/char/drm/drm_os_linux.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/char/drm/drm_os_linux.h 2003-02-04 15:54:22.000000000 +0000 @@ -0,0 +1,56 @@ +#define __NO_VERSION__ + +#include /* For task queue support */ +#include + + +/* For data going from/to the kernel through the ioctl argument */ +#define DRM_COPY_FROM_USER_IOCTL(arg1, arg2, arg3) \ + if ( copy_from_user(&arg1, arg2, arg3) ) \ + return -EFAULT +#define DRM_COPY_TO_USER_IOCTL(arg1, arg2, arg3) \ + if ( copy_to_user(arg1, &arg2, arg3) ) \ + return -EFAULT + + +#warning the author of this code needs to read up on list_entry +#define DRM_GETSAREA() \ +do { \ + struct list_head *list; \ + list_for_each( list, &dev->maplist->head ) { \ + drm_map_list_t *entry = (drm_map_list_t *)list; \ + if ( entry->map && \ + entry->map->type == _DRM_SHM && \ + (entry->map->flags & _DRM_CONTAINS_LOCK) ) { \ + dev_priv->sarea = entry->map; \ + break; \ + } \ + } \ +} while (0) + +#define DRM_WAIT_ON( ret, queue, timeout, condition ) \ +do { \ + DECLARE_WAITQUEUE(entry, current); \ + unsigned long end = jiffies + (timeout); \ + add_wait_queue(&(queue), &entry); \ + \ + for (;;) { \ + set_current_state(TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if((signed)(end - jiffies) <= 0) { \ + ret = -EBUSY; \ + break; \ + } \ + schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \ + if (signal_pending(current)) { \ + ret = -EINTR; \ + break; \ + } \ + } \ + set_current_state(TASK_RUNNING); \ + remove_wait_queue(&(queue), &entry); \ +} while (0) + + + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/drmP.h linux.21pre4-ac2/drivers/char/drm/drmP.h --- linux.21pre4/drivers/char/drm/drmP.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/drmP.h 2003-02-04 15:54:22.000000000 +0000 @@ -53,6 +53,7 @@ #include #include /* For (un)lock_kernel */ #include +#include #if defined(__alpha__) || defined(__powerpc__) #include /* For pte_wrprotect */ #endif @@ -71,10 +72,7 @@ #include #include "drm.h" -/* page_to_bus for earlier kernels, not optimal in all cases */ -#ifndef page_to_bus -#define page_to_bus(page) ((unsigned int)(virt_to_bus(page_address(page)))) -#endif +#include "drm_os_linux.h" /* DRM template customization defaults */ @@ -209,6 +207,7 @@ (unsigned long)(n),sizeof(*(ptr)))) #endif /* i386 & alpha */ #endif +#define __REALLY_HAVE_SG (__HAVE_SG) /* Begin the DRM... */ @@ -251,41 +250,58 @@ #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) -#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) + /* Backward compatibility section */ +#ifndef minor +#define minor(x) MINOR((x)) +#endif -/* Macros to make printk easier */ +#ifndef MODULE_LICENSE +#define MODULE_LICENSE(x) +#endif -#if ( __GNUC__ > 2 ) -#define DRM_ERROR(fmt, arg...) \ - printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __FUNCTION__, ##arg) -#define DRM_MEM_ERROR(area, fmt, arg...) \ - printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __FUNCTION__, \ - DRM(mem_stats)[area].name , ##arg) -#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg) +#ifndef pte_offset_map +#define pte_offset_map pte_offset +#define pte_unmap(pte) +#endif -#if DRM_DEBUG_CODE -#define DRM_DEBUG(fmt, arg...) \ - do { \ - if ( DRM(flags) & DRM_FLAG_DEBUG ) \ - printk(KERN_DEBUG \ - "[" DRM_NAME ":%s] " fmt , \ - __FUNCTION__, \ - ##arg); \ - } while (0) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19) +static inline struct page * vmalloc_to_page(void * vmalloc_addr) +{ + unsigned long addr = (unsigned long) vmalloc_addr; + struct page *page = NULL; + pgd_t *pgd = pgd_offset_k(addr); + pmd_t *pmd; + pte_t *ptep, pte; + + if (!pgd_none(*pgd)) { + pmd = pmd_offset(pgd, addr); + if (!pmd_none(*pmd)) { + ptep = pte_offset_map(pmd, addr); + pte = *ptep; + if (pte_present(pte)) + page = pte_page(pte); + pte_unmap(ptep); + } + } + return page; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) +#define DRM_RPR_ARG(vma) #else -#define DRM_DEBUG(fmt, arg...) do { } while (0) +#define DRM_RPR_ARG(vma) vma, #endif -#else /* Gcc 2.x */ -/* Work around a C preprocessor bug */ +#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) /* Macros to make printk easier */ #define DRM_ERROR(fmt, arg...) \ - printk(KERN_ERR "[" DRM_NAME ":" __FUNCTION__ "] *ERROR* " fmt , ##arg) + printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __FUNCTION__ , ##arg) #define DRM_MEM_ERROR(area, fmt, arg...) \ - printk(KERN_ERR "[" DRM_NAME ":" __FUNCTION__ ":%s] *ERROR* " fmt , \ + printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __FUNCTION__, \ DRM(mem_stats)[area].name , ##arg) #define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg) @@ -294,16 +310,13 @@ do { \ if ( DRM(flags) & DRM_FLAG_DEBUG ) \ printk(KERN_DEBUG \ - "[" DRM_NAME ":" __FUNCTION__ "] " fmt , \ - ##arg); \ + "[" DRM_NAME ":%s] " fmt , \ + __FUNCTION__ , ##arg); \ } while (0) #else #define DRM_DEBUG(fmt, arg...) do { } while (0) #endif -#endif /* Gcc 2.x */ - - #define DRM_PROC_LIMIT (PAGE_SIZE-80) #define DRM_PROC_PRINT(fmt, arg...) \ @@ -318,6 +331,9 @@ #define DRM_IOREMAP(map) \ (map)->handle = DRM(ioremap)( (map)->offset, (map)->size ) +#define DRM_IOREMAP_NOCACHE(map) \ + (map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size) + #define DRM_IOREMAPFREE(map) \ do { \ if ( (map)->handle && (map)->size ) \ @@ -599,6 +615,17 @@ drm_map_t *map; } drm_map_list_t; +#if __HAVE_VBL_IRQ + +typedef struct drm_vbl_sig { + struct list_head head; + unsigned int sequence; + struct siginfo info; + struct task_struct *task; +} drm_vbl_sig_t; + +#endif + typedef struct drm_device { const char *name; /* Simple driver name */ char *unique; /* Unique identifier: e.g., busid */ @@ -658,6 +685,12 @@ int last_context; /* Last current context */ unsigned long last_switch; /* jiffies at last context switch */ struct tq_struct tq; +#if __HAVE_VBL_IRQ + wait_queue_head_t vbl_queue; + atomic_t vbl_received; + spinlock_t vbl_lock; + drm_vbl_sig_t vbl_sigs; +#endif cycles_t ctx_start; cycles_t lck_start; #if __HAVE_DMA_HISTOGRAM @@ -725,16 +758,16 @@ /* Mapping support (drm_vm.h) */ extern struct page *DRM(vm_nopage)(struct vm_area_struct *vma, unsigned long address, - int unused); + int write_access); extern struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma, unsigned long address, - int unused); + int write_access); extern struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma, unsigned long address, - int unused); + int write_access); extern struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma, unsigned long address, - int unused); + int write_access); extern void DRM(vm_open)(struct vm_area_struct *vma); extern void DRM(vm_close)(struct vm_area_struct *vma); extern void DRM(vm_shm_close)(struct vm_area_struct *vma); @@ -756,6 +789,7 @@ extern void DRM(free_pages)(unsigned long address, int order, int area); extern void *DRM(ioremap)(unsigned long offset, unsigned long size); +extern void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size); extern void DRM(ioremapfree)(void *pt, unsigned long size); #if __REALLY_HAVE_AGP @@ -885,6 +919,15 @@ extern int DRM(irq_uninstall)( drm_device_t *dev ); extern void DRM(dma_service)( int irq, void *device, struct pt_regs *regs ); +extern void DRM(driver_irq_preinstall)( drm_device_t *dev ); +extern void DRM(driver_irq_postinstall)( drm_device_t *dev ); +extern void DRM(driver_irq_uninstall)( drm_device_t *dev ); +#if __HAVE_VBL_IRQ +extern int DRM(wait_vblank)(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq); +extern void DRM(vbl_send_signals)( drm_device_t *dev ); +#endif #if __HAVE_DMA_IRQ_BH extern void DRM(dma_immediate_bh)( void *dev ); #endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/drm_proc.h linux.21pre4-ac2/drivers/char/drm/drm_proc.h --- linux.21pre4/drivers/char/drm/drm_proc.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/drm_proc.h 2003-02-04 15:54:22.000000000 +0000 @@ -147,10 +147,10 @@ *eof = 0; if (dev->unique) { - DRM_PROC_PRINT("%s 0x%x %s\n", - dev->name, dev->device, dev->unique); + DRM_PROC_PRINT("%s 0x%lx %s\n", + dev->name, (long)dev->device, dev->unique); } else { - DRM_PROC_PRINT("%s 0x%x\n", dev->name, dev->device); + DRM_PROC_PRINT("%s 0x%lx\n", dev->name, (long)dev->device); } if (len > request + offset) return request; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/drm_sarea.h linux.21pre4-ac2/drivers/char/drm/drm_sarea.h --- linux.21pre4/drivers/char/drm/drm_sarea.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/char/drm/drm_sarea.h 2003-01-06 17:25:49.000000000 +0000 @@ -0,0 +1,57 @@ +/* sarea.h -- SAREA definitions -*- linux-c -*- + * + * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Michel Dänzer + */ + +#ifndef _DRM_SAREA_H_ +#define _DRM_SAREA_H_ + +#define SAREA_MAX_DRAWABLES 256 + +typedef struct _drm_sarea_drawable_t { + unsigned int stamp; + unsigned int flags; +} drm_sarea_drawable_t; + +typedef struct _dri_sarea_frame_t { + unsigned int x; + unsigned int y; + unsigned int width; + unsigned int height; + unsigned int fullscreen; +} drm_sarea_frame_t; + +typedef struct _drm_sarea_t { + /* first thing is always the drm locking structure */ + drm_hw_lock_t lock; + /* NOT_DONE: Use readers/writer lock for drawable_lock */ + drm_hw_lock_t drawable_lock; + drm_sarea_drawable_t drawableTable[SAREA_MAX_DRAWABLES]; + drm_sarea_frame_t frame; + drm_context_t dummy_context; +} drm_sarea_t; + +#endif /* _DRM_SAREA_H_ */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/drm_stub.h linux.21pre4-ac2/drivers/char/drm/drm_stub.h --- linux.21pre4/drivers/char/drm/drm_stub.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/drm_stub.h 2003-02-04 15:54:22.000000000 +0000 @@ -48,7 +48,7 @@ static int DRM(stub_open)(struct inode *inode, struct file *filp) { - int minor = MINOR(inode->i_rdev); + int minor = minor(inode->i_rdev); int err = -ENODEV; struct file_operations *old_fops; @@ -65,8 +65,8 @@ } static struct file_operations DRM(stub_fops) = { - owner: THIS_MODULE, - open: DRM(stub_open) + .owner = THIS_MODULE, + .open = DRM(stub_open) }; static int DRM(stub_getminor)(const char *name, struct file_operations *fops, diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/drm_vm.h linux.21pre4-ac2/drivers/char/drm/drm_vm.h --- linux.21pre4/drivers/char/drm/drm_vm.h 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/drm_vm.h 2003-02-04 15:54:22.000000000 +0000 @@ -70,7 +70,7 @@ * Find the right map */ - if(!dev->agp->cant_use_aperture) goto vm_nopage_error; + if(!dev->agp || !dev->agp->cant_use_aperture) goto vm_nopage_error; list_for_each(list, &dev->maplist->head) { r_list = (drm_map_list_t *)list; @@ -141,9 +141,7 @@ return NOPAGE_OOM; get_page(page); -#if 0 /* XXX page_to_bus is not a portable interface available on all platforms. */ - DRM_DEBUG("0x%08lx => 0x%08llx\n", address, (u64)page_to_bus(page)); -#endif + DRM_DEBUG("shm_nopage 0x%lx\n", address); return page; } @@ -245,10 +243,7 @@ get_page(page); -#if 0 /* XXX page_to_bus is not a portable interface available on all platforms. */ - DRM_DEBUG("0x%08lx (page %lu) => 0x%08llx\n", address, page_nr, - (u64)page_to_bus(page)); -#endif + DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr); return page; } @@ -449,12 +444,12 @@ } offset = DRIVER_GET_REG_OFS(); #ifdef __sparc__ - if (io_remap_page_range(vma->vm_start, + if (io_remap_page_range(DRM_RPR_ARG(vma) vma->vm_start, VM_OFFSET(vma) + offset, vma->vm_end - vma->vm_start, vma->vm_page_prot, 0)) #else - if (remap_page_range(vma->vm_start, + if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start, VM_OFFSET(vma) + offset, vma->vm_end - vma->vm_start, vma->vm_page_prot)) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/gamma_dma.c linux.21pre4-ac2/drivers/char/drm/gamma_dma.c --- linux.21pre4/drivers/char/drm/gamma_dma.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/gamma_dma.c 2003-01-31 11:54:14.000000000 +0000 @@ -31,33 +31,32 @@ #include "gamma.h" #include "drmP.h" +#include "drm.h" +#include "gamma_drm.h" #include "gamma_drv.h" #include /* For task queue support */ #include - static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address, unsigned long length) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; - - GAMMA_WRITE(GAMMA_DMAADDRESS, virt_to_phys((void *)address)); - while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4) - ; + (drm_gamma_private_t *)dev->dev_private; + mb(); + while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); + GAMMA_WRITE(GAMMA_DMAADDRESS, address); + while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4) cpu_relax(); GAMMA_WRITE(GAMMA_DMACOUNT, length / 4); } void gamma_dma_quiescent_single(drm_device_t *dev) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; + (drm_gamma_private_t *)dev->dev_private; + while (GAMMA_READ(GAMMA_DMACOUNT)) cpu_relax(); - while (GAMMA_READ(GAMMA_DMACOUNT)) - ; - while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) - ; + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10); GAMMA_WRITE(GAMMA_SYNC, 0); @@ -71,56 +70,50 @@ void gamma_dma_quiescent_dual(drm_device_t *dev) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; + (drm_gamma_private_t *)dev->dev_private; + while (GAMMA_READ(GAMMA_DMACOUNT)) cpu_relax(); - while (GAMMA_READ(GAMMA_DMACOUNT)) - ; - while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) - ; + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) cpu_relax(); GAMMA_WRITE(GAMMA_BROADCASTMASK, 3); - GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10); GAMMA_WRITE(GAMMA_SYNC, 0); - /* Read from first MX */ + /* Read from first MX */ do { - while (!GAMMA_READ(GAMMA_OUTFIFOWORDS)) - ; + while (!GAMMA_READ(GAMMA_OUTFIFOWORDS)) cpu_relax(); } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG); - /* Read from second MX */ + /* Read from second MX */ do { - while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000)) - ; + while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000)) cpu_relax(); } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG); } void gamma_dma_ready(drm_device_t *dev) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; - - while (GAMMA_READ(GAMMA_DMACOUNT)) - ; + (drm_gamma_private_t *)dev->dev_private; + while (GAMMA_READ(GAMMA_DMACOUNT)) cpu_relax(); } static inline int gamma_dma_is_ready(drm_device_t *dev) { drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; - - return !GAMMA_READ(GAMMA_DMACOUNT); + (drm_gamma_private_t *)dev->dev_private; + return(!GAMMA_READ(GAMMA_DMACOUNT)); } void gamma_dma_service(int irq, void *device, struct pt_regs *regs) { - drm_device_t *dev = (drm_device_t *)device; - drm_device_dma_t *dma = dev->dma; + drm_device_t *dev = (drm_device_t *)device; + drm_device_dma_t *dma = dev->dma; drm_gamma_private_t *dev_priv = - (drm_gamma_private_t *)dev->dev_private; + (drm_gamma_private_t *)dev->dev_private; atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */ + + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) cpu_relax(); GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */ GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8); GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001); @@ -164,7 +157,9 @@ } buf = dma->next_buffer; - address = (unsigned long)buf->address; + /* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */ + /* So we pass the buffer index value into the physical page offset */ + address = buf->idx << 12; length = buf->used; DRM_DEBUG("context %d, buffer %d (%ld bytes)\n", @@ -231,6 +226,9 @@ buf->time_dispatched = get_cycles(); #endif + /* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */ + address = buf->idx << 12; + gamma_dma_dispatch(dev, address, length); gamma_free_buffer(dev, dma->this_buffer); dma->this_buffer = buf; @@ -523,11 +521,11 @@ } } if (retcode) { - DRM_ERROR("ctx%d w%d p%d c%d i%d l%d %d/%d\n", + DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d %d/%d\n", d->context, last_buf->waiting, last_buf->pending, - DRM_WAITCOUNT(dev, d->context), + (long)DRM_WAITCOUNT(dev, d->context), last_buf->idx, last_buf->list, last_buf->pid, @@ -581,3 +579,267 @@ return retcode; } + +/* ============================================================= + * DMA initialization, cleanup + */ + +static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init ) +{ + drm_gamma_private_t *dev_priv; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf; + int i; + struct list_head *list; + unsigned long *pgt; + + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t), + DRM_MEM_DRIVER ); + if ( !dev_priv ) + return -ENOMEM; + + dev->dev_private = (void *)dev_priv; + + memset( dev_priv, 0, sizeof(drm_gamma_private_t) ); + + list_for_each(list, &dev->maplist->head) { + #warning list_entry() is needed here + drm_map_list_t *r_list = (drm_map_list_t *)list; + if( r_list->map && + r_list->map->type == _DRM_SHM && + r_list->map->flags & _DRM_CONTAINS_LOCK ) { + dev_priv->sarea = r_list->map; + break; + } + } + + DRM_FIND_MAP( dev_priv->mmio0, init->mmio0 ); + DRM_FIND_MAP( dev_priv->mmio1, init->mmio1 ); + DRM_FIND_MAP( dev_priv->mmio2, init->mmio2 ); + DRM_FIND_MAP( dev_priv->mmio3, init->mmio3 ); + + dev_priv->sarea_priv = (drm_gamma_sarea_t *) + ((u8 *)dev_priv->sarea->handle + + init->sarea_priv_offset); + + if (init->pcimode) { + buf = dma->buflist[GLINT_DRI_BUF_COUNT]; + pgt = buf->address; + + for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) { + buf = dma->buflist[i]; + *pgt = virt_to_phys((void*)buf->address) | 0x07; + pgt++; + } + + buf = dma->buflist[GLINT_DRI_BUF_COUNT]; + } else { + DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset ); + + DRM_IOREMAP( dev_priv->buffers ); + + buf = dma->buflist[GLINT_DRI_BUF_COUNT]; + pgt = buf->address; + + for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) { + buf = dma->buflist[i]; + *pgt = (unsigned int)buf->address + 0x07; + pgt++; + } + + buf = dma->buflist[GLINT_DRI_BUF_COUNT]; + + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1) cpu_relax(); + GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe) ; + } + while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2); cpu_relax(); + GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) ); + GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 ); + + return 0; +} + +int gamma_do_cleanup_dma( drm_device_t *dev ) +{ + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + if ( dev->dev_private ) { + drm_gamma_private_t *dev_priv = dev->dev_private; + + DRM_IOREMAPFREE( dev_priv->buffers ); + + DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t), + DRM_MEM_DRIVER ); + dev->dev_private = NULL; + } + + return 0; +} + +int gamma_dma_init( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_gamma_init_t init; + + if ( copy_from_user( &init, (drm_gamma_init_t *)arg, sizeof(init) ) ) + return -EFAULT; + + switch ( init.func ) { + case GAMMA_INIT_DMA: + return gamma_do_init_dma( dev, &init ); + case GAMMA_CLEANUP_DMA: + return gamma_do_cleanup_dma( dev ); + } + + return -EINVAL; +} + +static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy ) +{ + drm_device_dma_t *dma = dev->dma; + unsigned int *screenbuf; + + DRM_DEBUG( "%s\n", __FUNCTION__ ); + + /* We've DRM_RESTRICTED this DMA buffer */ + + screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address; + +#if 0 + *buffer++ = 0x180; /* Tag (FilterMode) */ + *buffer++ = 0x200; /* Allow FBColor through */ + *buffer++ = 0x53B; /* Tag */ + *buffer++ = copy->Pitch; + *buffer++ = 0x53A; /* Tag */ + *buffer++ = copy->SrcAddress; + *buffer++ = 0x539; /* Tag */ + *buffer++ = copy->WidthHeight; /* Initiates transfer */ + *buffer++ = 0x53C; /* Tag - DMAOutputAddress */ + *buffer++ = virt_to_phys((void*)screenbuf); + *buffer++ = 0x53D; /* Tag - DMAOutputCount */ + *buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/ + + /* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */ + /* Now put it back to the screen */ + + *buffer++ = 0x180; /* Tag (FilterMode) */ + *buffer++ = 0x400; /* Allow Sync through */ + *buffer++ = 0x538; /* Tag - DMARectangleReadTarget */ + *buffer++ = 0x155; /* FBSourceData | count */ + *buffer++ = 0x537; /* Tag */ + *buffer++ = copy->Pitch; + *buffer++ = 0x536; /* Tag */ + *buffer++ = copy->DstAddress; + *buffer++ = 0x535; /* Tag */ + *buffer++ = copy->WidthHeight; /* Initiates transfer */ + *buffer++ = 0x530; /* Tag - DMAAddr */ + *buffer++ = virt_to_phys((void*)screenbuf); + *buffer++ = 0x531; + *buffer++ = copy->Count; /* initiates DMA transfer of color data */ +#endif + + /* need to dispatch it now */ + + return 0; +} + +int gamma_dma_copy( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_gamma_copy_t copy; + + if ( copy_from_user( ©, (drm_gamma_copy_t *)arg, sizeof(copy) ) ) + return -EFAULT; + + return gamma_do_copy_dma( dev, © ); +} + +/* ============================================================= + * Per Context SAREA Support + */ + +int gamma_getsareactx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_priv_map_t request; + drm_map_t *map; + + if (copy_from_user(&request, + (drm_ctx_priv_map_t *)arg, + sizeof(request))) + return -EFAULT; + + down(&dev->struct_sem); + if ((int)request.ctx_id >= dev->max_context) { + up(&dev->struct_sem); + return -EINVAL; + } + + map = dev->context_sareas[request.ctx_id]; + up(&dev->struct_sem); + + request.handle = map->handle; + if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request))) + return -EFAULT; + return 0; +} + +int gamma_setsareactx(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_ctx_priv_map_t request; + drm_map_t *map = NULL; + drm_map_list_t *r_list; + struct list_head *list; + + if (copy_from_user(&request, + (drm_ctx_priv_map_t *)arg, + sizeof(request))) + return -EFAULT; + + down(&dev->struct_sem); + r_list = NULL; + list_for_each(list, &dev->maplist->head) { + r_list = (drm_map_list_t *)list; + if(r_list->map && + r_list->map->handle == request.handle) break; + } + if (list == &(dev->maplist->head)) { + up(&dev->struct_sem); + return -EINVAL; + } + map = r_list->map; + up(&dev->struct_sem); + + if (!map) return -EINVAL; + + down(&dev->struct_sem); + if ((int)request.ctx_id >= dev->max_context) { + up(&dev->struct_sem); + return -EINVAL; + } + dev->context_sareas[request.ctx_id] = map; + up(&dev->struct_sem); + return 0; +} + +/* drm_dma.h hooks +*/ +void DRM(driver_irq_preinstall)( drm_device_t *dev ) { +} + +void DRM(driver_irq_postinstall)( drm_device_t *dev ) { +} + +void DRM(driver_irq_uninstall)( drm_device_t *dev ) { +} diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/gamma_drm.h linux.21pre4-ac2/drivers/char/drm/gamma_drm.h --- linux.21pre4/drivers/char/drm/gamma_drm.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/char/drm/gamma_drm.h 2003-01-06 17:25:49.000000000 +0000 @@ -0,0 +1,89 @@ +#ifndef _GAMMA_DRM_H_ +#define _GAMMA_DRM_H_ + +typedef struct _drm_gamma_tex_region { + unsigned char next, prev; /* indices to form a circular LRU */ + unsigned char in_use; /* owned by a client, or free? */ + int age; /* tracked by clients to update local LRU's */ +} drm_gamma_tex_region_t; + +typedef struct { + unsigned int GDeltaMode; + unsigned int GDepthMode; + unsigned int GGeometryMode; + unsigned int GTransformMode; +} drm_gamma_context_regs_t; + +typedef struct _drm_gamma_sarea { + drm_gamma_context_regs_t context_state; + + unsigned int dirty; + + + /* Maintain an LRU of contiguous regions of texture space. If + * you think you own a region of texture memory, and it has an + * age different to the one you set, then you are mistaken and + * it has been stolen by another client. If global texAge + * hasn't changed, there is no need to walk the list. + * + * These regions can be used as a proxy for the fine-grained + * texture information of other clients - by maintaining them + * in the same lru which is used to age their own textures, + * clients have an approximate lru for the whole of global + * texture space, and can make informed decisions as to which + * areas to kick out. There is no need to choose whether to + * kick out your own texture or someone else's - simply eject + * them all in LRU order. + */ + +#define GAMMA_NR_TEX_REGIONS 64 + drm_gamma_tex_region_t texList[GAMMA_NR_TEX_REGIONS+1]; + /* Last elt is sentinal */ + int texAge; /* last time texture was uploaded */ + int last_enqueue; /* last time a buffer was enqueued */ + int last_dispatch; /* age of the most recently dispatched buffer */ + int last_quiescent; /* */ + int ctxOwner; /* last context to upload state */ + + int vertex_prim; +} drm_gamma_sarea_t; + +/* WARNING: If you change any of these defines, make sure to wear a bullet + * proof vest because these are part of the stable kernel<->userspace ABI + */ + +/* Gamma specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_IOCTL_GAMMA_INIT DRM_IOW( 0x40, drm_gamma_init_t) +#define DRM_IOCTL_GAMMA_COPY DRM_IOW( 0x41, drm_gamma_copy_t) + +typedef struct drm_gamma_copy { + unsigned int DMAOutputAddress; + unsigned int DMAOutputCount; + unsigned int DMAReadGLINTSource; + unsigned int DMARectangleWriteAddress; + unsigned int DMARectangleWriteLinePitch; + unsigned int DMARectangleWrite; + unsigned int DMARectangleReadAddress; + unsigned int DMARectangleReadLinePitch; + unsigned int DMARectangleRead; + unsigned int DMARectangleReadTarget; +} drm_gamma_copy_t; + +typedef struct drm_gamma_init { + enum { + GAMMA_INIT_DMA = 0x01, + GAMMA_CLEANUP_DMA = 0x02 + } func; + + int sarea_priv_offset; + int pcimode; + unsigned int mmio0; + unsigned int mmio1; + unsigned int mmio2; + unsigned int mmio3; + unsigned int buffers_offset; +} drm_gamma_init_t; + +#endif /* _GAMMA_DRM_H_ */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/gamma_drv.c linux.21pre4-ac2/drivers/char/drm/gamma_drv.c --- linux.21pre4/drivers/char/drm/gamma_drv.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/gamma_drv.c 2003-01-06 17:25:49.000000000 +0000 @@ -32,57 +32,18 @@ #include #include "gamma.h" #include "drmP.h" +#include "drm.h" +#include "gamma_drm.h" #include "gamma_drv.h" -#define DRIVER_AUTHOR "VA Linux Systems Inc." - -#define DRIVER_NAME "gamma" -#define DRIVER_DESC "3DLabs gamma" -#define DRIVER_DATE "20010216" - -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 0 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { gamma_dma, 1, 0 } - - -#define __HAVE_COUNTERS 5 -#define __HAVE_COUNTER6 _DRM_STAT_IRQ -#define __HAVE_COUNTER7 _DRM_STAT_DMA -#define __HAVE_COUNTER8 _DRM_STAT_PRIMARY -#define __HAVE_COUNTER9 _DRM_STAT_SPECIAL -#define __HAVE_COUNTER10 _DRM_STAT_MISSED - - #include "drm_auth.h" +#include "drm_agpsupport.h" #include "drm_bufs.h" #include "drm_context.h" #include "drm_dma.h" #include "drm_drawable.h" #include "drm_drv.h" -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init gamma_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", gamma_options ); -#endif - - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/gamma_drv.h linux.21pre4-ac2/drivers/char/drm/gamma_drv.h --- linux.21pre4/drivers/char/drm/gamma_drv.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/gamma_drv.h 2003-01-06 17:25:58.000000000 +0000 @@ -32,8 +32,9 @@ #ifndef _GAMMA_DRV_H_ #define _GAMMA_DRV_H_ - typedef struct drm_gamma_private { + drm_gamma_sarea_t *sarea_priv; + drm_map_t *sarea; drm_map_t *buffers; drm_map_t *mmio0; drm_map_t *mmio1; @@ -51,6 +52,11 @@ } \ } while (0) + /* gamma_dma.c */ +extern int gamma_dma_init( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int gamma_dma_copy( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); extern void gamma_dma_ready(drm_device_t *dev); extern void gamma_dma_quiescent_single(drm_device_t *dev); @@ -63,6 +69,7 @@ extern int gamma_find_devices(void); extern int gamma_found(void); +#define GLINT_DRI_BUF_COUNT 256 #define GAMMA_OFF(reg) \ ((reg < 0x1000) \ @@ -78,7 +85,6 @@ ((reg < 0x10000) ? dev_priv->mmio1->handle : \ ((reg < 0x11000) ? dev_priv->mmio2->handle : \ dev_priv->mmio3->handle)))) - #define GAMMA_ADDR(reg) (GAMMA_BASE(reg) + GAMMA_OFF(reg)) #define GAMMA_DEREF(reg) *(__volatile__ int *)GAMMA_ADDR(reg) #define GAMMA_READ(reg) GAMMA_DEREF(reg) @@ -91,9 +97,11 @@ #define GAMMA_FILTERMODE 0x8c00 #define GAMMA_GCOMMANDINTFLAGS 0x0c50 #define GAMMA_GCOMMANDMODE 0x0c40 +#define GAMMA_QUEUED_DMA_MODE 1<<1 #define GAMMA_GCOMMANDSTATUS 0x0c60 #define GAMMA_GDELAYTIMER 0x0c38 #define GAMMA_GDMACONTROL 0x0060 +#define GAMMA_USE_AGP 1<<1 #define GAMMA_GINTENABLE 0x0808 #define GAMMA_GINTFLAGS 0x0810 #define GAMMA_INFIFOSPACE 0x0018 @@ -101,5 +109,12 @@ #define GAMMA_OUTPUTFIFO 0x2000 #define GAMMA_SYNC 0x8c40 #define GAMMA_SYNC_TAG 0x0188 +#define GAMMA_PAGETABLEADDR 0x0C00 +#define GAMMA_PAGETABLELENGTH 0x0C08 + +#define GAMMA_PASSTHROUGH 0x1FE +#define GAMMA_DMAADDRTAG 0x530 +#define GAMMA_DMACOUNTTAG 0x531 +#define GAMMA_COMMANDINTTAG 0x532 #endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/gamma.h linux.21pre4-ac2/drivers/char/drm/gamma.h --- linux.21pre4/drivers/char/drm/gamma.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/gamma.h 2003-01-06 17:25:58.000000000 +0000 @@ -38,9 +38,36 @@ */ #define __HAVE_MTRR 1 +#define DRIVER_AUTHOR "VA Linux Systems Inc." + +#define DRIVER_NAME "gamma" +#define DRIVER_DESC "3DLabs gamma" +#define DRIVER_DATE "20010624" + +#define DRIVER_MAJOR 2 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 0 + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { gamma_dma, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_GAMMA_INIT)] = { gamma_dma_init, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_GAMMA_COPY)] = { gamma_dma_copy, 1, 1 } + +#define IOCTL_TABLE_NAME DRM(ioctls) +#define IOCTL_FUNC_NAME DRM(ioctl) + +#define __HAVE_COUNTERS 5 +#define __HAVE_COUNTER6 _DRM_STAT_IRQ +#define __HAVE_COUNTER7 _DRM_STAT_DMA +#define __HAVE_COUNTER8 _DRM_STAT_PRIMARY +#define __HAVE_COUNTER9 _DRM_STAT_SPECIAL +#define __HAVE_COUNTER10 _DRM_STAT_MISSED + /* DMA customization: */ #define __HAVE_DMA 1 +#define __HAVE_AGP 1 +#define __MUST_HAVE_AGP 0 #define __HAVE_OLD_DMA 1 #define __HAVE_PCI_DMA 1 @@ -61,33 +88,61 @@ #define __HAVE_DMA_QUIESCENT 1 #define DRIVER_DMA_QUIESCENT() do { \ /* FIXME ! */ \ - gamma_dma_quiescent_dual(dev); \ + gamma_dma_quiescent_single(dev); \ return 0; \ } while (0) #define __HAVE_DMA_IRQ 1 #define __HAVE_DMA_IRQ_BH 1 + +#if 1 #define DRIVER_PREINSTALL() do { \ drm_gamma_private_t *dev_priv = \ (drm_gamma_private_t *)dev->dev_private;\ - GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000000 ); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000004 ); \ GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 ); \ } while (0) - #define DRIVER_POSTINSTALL() do { \ drm_gamma_private_t *dev_priv = \ (drm_gamma_private_t *)dev->dev_private;\ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3) cpu_relax(); \ GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 ); \ GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 ); \ GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 ); \ } while (0) +#else +#define DRIVER_POSTINSTALL() do { \ + drm_gamma_private_t *dev_priv = \ + (drm_gamma_private_t *)dev->dev_private;\ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002000 ); \ + GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000004 ); \ +} while (0) + +#define DRIVER_PREINSTALL() do { \ + drm_gamma_private_t *dev_priv = \ + (drm_gamma_private_t *)dev->dev_private;\ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + GAMMA_WRITE( GAMMA_GCOMMANDMODE, GAMMA_QUEUED_DMA_MODE );\ + GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 );\ +} while (0) +#endif #define DRIVER_UNINSTALL() do { \ drm_gamma_private_t *dev_priv = \ (drm_gamma_private_t *)dev->dev_private;\ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax(); \ + while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3) cpu_relax(); \ GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 ); \ GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 ); \ GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 ); \ } while (0) +#define DRIVER_AGP_BUFFERS_MAP( dev ) \ + ((drm_gamma_private_t *)((dev)->dev_private))->buffers + #endif /* __GAMMA_H__ */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/i810_dma.c linux.21pre4-ac2/drivers/char/drm/i810_dma.c --- linux.21pre4/drivers/char/drm/i810_dma.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/i810_dma.c 2003-01-06 17:26:33.000000000 +0000 @@ -26,21 +26,20 @@ * * Authors: Rickard E. (Rik) Faith * Jeff Hartmann - * Keith Whitwell + * Keith Whitwell * */ #include #include "i810.h" #include "drmP.h" +#include "drm.h" +#include "i810_drm.h" #include "i810_drv.h" #include /* For task queue support */ -#include +#include -/* in case we don't have a 2.3.99-pre6 kernel or later: */ -#ifndef VM_DONTCOPY -#define VM_DONTCOPY 0 -#endif +#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1) #define I810_BUF_FREE 2 #define I810_BUF_CLIENT 1 @@ -51,29 +50,27 @@ #define RING_LOCALS unsigned int outring, ringmask; volatile char *virt; -#define BEGIN_LP_RING(n) do { \ - if (I810_VERBOSE) \ - DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \ - n, __FUNCTION__); \ - if (dev_priv->ring.space < n*4) \ - i810_wait_ring(dev, n*4); \ - dev_priv->ring.space -= n*4; \ - outring = dev_priv->ring.tail; \ - ringmask = dev_priv->ring.tail_mask; \ - virt = dev_priv->ring.virtual_start; \ +#define BEGIN_LP_RING(n) do { \ + if (0) DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", n, __FUNCTION__); \ + if (dev_priv->ring.space < n*4) \ + i810_wait_ring(dev, n*4); \ + dev_priv->ring.space -= n*4; \ + outring = dev_priv->ring.tail; \ + ringmask = dev_priv->ring.tail_mask; \ + virt = dev_priv->ring.virtual_start; \ } while (0) -#define ADVANCE_LP_RING() do { \ - if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \ - dev_priv->ring.tail = outring; \ - I810_WRITE(LP_RING + RING_TAIL, outring); \ +#define ADVANCE_LP_RING() do { \ + if (0) DRM_DEBUG("ADVANCE_LP_RING\n"); \ + dev_priv->ring.tail = outring; \ + I810_WRITE(LP_RING + RING_TAIL, outring); \ } while(0) -#define OUT_RING(n) do { \ - if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ - *(volatile unsigned int *)(virt + outring) = n; \ - outring += 4; \ - outring &= ringmask; \ +#define OUT_RING(n) do { \ + if (0) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ + *(volatile unsigned int *)(virt + outring) = n; \ + outring += 4; \ + outring &= ringmask; \ } while (0) static inline void i810_print_status_page(drm_device_t *dev) @@ -135,14 +132,14 @@ } static struct file_operations i810_buffer_fops = { - open: DRM(open), - flush: DRM(flush), - release: DRM(release), - ioctl: DRM(ioctl), - mmap: i810_mmap_buffers, - read: DRM(read), - fasync: DRM(fasync), - poll: DRM(poll), + .open = DRM(open), + .flush = DRM(flush), + .release = DRM(release), + .ioctl = DRM(ioctl), + .mmap = i810_mmap_buffers, + .read = DRM(read), + .fasync = DRM(fasync), + .poll = DRM(poll), }; int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) @@ -165,7 +162,7 @@ buf_priv->currently_mapped = I810_BUF_MAPPED; unlock_kernel(); - if (remap_page_range(vma->vm_start, + if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start, VM_OFFSET(vma), vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; @@ -183,28 +180,31 @@ if(buf_priv->currently_mapped == I810_BUF_MAPPED) return -EINVAL; - if(VM_DONTCOPY != 0) { - down_write( ¤t->mm->mmap_sem ); - old_fops = filp->f_op; - filp->f_op = &i810_buffer_fops; - dev_priv->mmap_buffer = buf; - buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, - PROT_READ|PROT_WRITE, - MAP_SHARED, - buf->bus_address); - dev_priv->mmap_buffer = NULL; - filp->f_op = old_fops; - if ((unsigned long)buf_priv->virtual > -1024UL) { - /* Real error */ - DRM_DEBUG("mmap error\n"); - retcode = (signed int)buf_priv->virtual; - buf_priv->virtual = 0; - } - up_write( ¤t->mm->mmap_sem ); - } else { - buf_priv->virtual = buf_priv->kernel_virtual; - buf_priv->currently_mapped = I810_BUF_MAPPED; + + + + down_write( ¤t->mm->mmap_sem ); + + old_fops = filp->f_op; + filp->f_op = &i810_buffer_fops; + dev_priv->mmap_buffer = buf; + buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, + PROT_READ|PROT_WRITE, + MAP_SHARED, + buf->bus_address); + dev_priv->mmap_buffer = NULL; + filp->f_op = old_fops; + if ((unsigned long)buf_priv->virtual > -1024UL) { + /* Real error */ + DRM_DEBUG("mmap error\n"); + retcode = (signed int)buf_priv->virtual; + buf_priv->virtual = 0; } + + + + up_write( ¤t->mm->mmap_sem ); + return retcode; } @@ -213,15 +213,21 @@ drm_i810_buf_priv_t *buf_priv = buf->dev_private; int retcode = 0; - if(VM_DONTCOPY != 0) { - if(buf_priv->currently_mapped != I810_BUF_MAPPED) - return -EINVAL; - down_write( ¤t->mm->mmap_sem ); - retcode = do_munmap(current->mm, - (unsigned long)buf_priv->virtual, - (size_t) buf->total); - up_write( ¤t->mm->mmap_sem ); - } + if(buf_priv->currently_mapped != I810_BUF_MAPPED) + return -EINVAL; + + + + down_write( ¤t->mm->mmap_sem ); + + retcode = DO_MUNMAP(current->mm, + (unsigned long)buf_priv->virtual, + (size_t) buf->total); + + + + up_write( ¤t->mm->mmap_sem ); + buf_priv->currently_mapped = I810_BUF_UNMAPPED; buf_priv->virtual = 0; @@ -273,8 +279,9 @@ dev_priv->ring.Size); } if(dev_priv->hw_status_page != 0UL) { - pci_free_consistent(dev->pdev, PAGE_SIZE, (void *)dev_priv->hw_status_page, - dev_priv->dma_status_page); + pci_free_consistent(dev->pdev, PAGE_SIZE, + (void *)dev_priv->hw_status_page, + dev_priv->dma_status_page); /* Need to rewrite hardware status page */ I810_WRITE(0x02080, 0x1ffff000); } @@ -301,8 +308,6 @@ end = jiffies + (HZ*3); while (ring->space < n) { - int i; - ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; ring->space = ring->head - (ring->tail+8); if (ring->space < 0) ring->space += ring->Size; @@ -317,7 +322,7 @@ goto out_wait_ring; } - for (i = 0 ; i < 2000 ; i++) ; + udelay(1); } out_wait_ring: @@ -405,9 +410,6 @@ ((u8 *)dev_priv->sarea_map->handle + init->sarea_priv_offset); - atomic_set(&dev_priv->flush_done, 0); - init_waitqueue_head(&dev_priv->flush_queue); - dev_priv->ring.Start = init->ring_start; dev_priv->ring.End = init->ring_end; dev_priv->ring.Size = init->ring_size; @@ -440,8 +442,9 @@ dev_priv->zi1 = init->depth_offset | init->pitch_bits; /* Program Hardware Status Page */ - dev_priv->hw_status_page = (unsigned long)pci_alloc_consistent(dev->pdev, PAGE_SIZE, - &dev_priv->dma_status_page); + dev_priv->hw_status_page = + (unsigned long) pci_alloc_consistent(dev->pdev, PAGE_SIZE, + &dev_priv->dma_status_page); if(dev_priv->hw_status_page == 0UL) { dev->dev_private = (void *)dev_priv; i810_dma_cleanup(dev); @@ -451,7 +454,7 @@ memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE); DRM_DEBUG("hw status page @ %lx\n", dev_priv->hw_status_page); - I810_WRITE(0x02080, dev_priv->dma_status_page); + I810_WRITE(0x02080, dev_priv->dma_status_page); DRM_DEBUG("Enabled hardware status page\n"); /* Now we need to init our freelist */ @@ -532,16 +535,12 @@ /* Most efficient way to verify state for the i810 is as it is * emitted. Non-conformant state is silently dropped. - * - * Use 'volatile' & local var tmp to force the emitted values to be - * identical to the verified ones. */ static void i810EmitContextVerified( drm_device_t *dev, - volatile unsigned int *code ) + unsigned int *code ) { drm_i810_private_t *dev_priv = dev->dev_private; int i, j = 0; - unsigned int tmp; RING_LOCALS; BEGIN_LP_RING( I810_CTX_SETUP_SIZE ); @@ -553,14 +552,13 @@ OUT_RING( code[I810_CTXREG_ST1] ); for ( i = 4 ; i < I810_CTX_SETUP_SIZE ; i++ ) { - tmp = code[i]; - - if ((tmp & (7<<29)) == (3<<29) && - (tmp & (0x1f<<24)) < (0x1d<<24)) + if ((code[i] & (7<<29)) == (3<<29) && + (code[i] & (0x1f<<24)) < (0x1d<<24)) { - OUT_RING( tmp ); + OUT_RING( code[i] ); j++; } + else printk("constext state dropped!!!\n"); } if (j & 1) @@ -574,7 +572,6 @@ { drm_i810_private_t *dev_priv = dev->dev_private; int i, j = 0; - unsigned int tmp; RING_LOCALS; BEGIN_LP_RING( I810_TEX_SETUP_SIZE ); @@ -585,14 +582,14 @@ OUT_RING( code[I810_TEXREG_MI3] ); for ( i = 4 ; i < I810_TEX_SETUP_SIZE ; i++ ) { - tmp = code[i]; - if ((tmp & (7<<29)) == (3<<29) && - (tmp & (0x1f<<24)) < (0x1d<<24)) + if ((code[i] & (7<<29)) == (3<<29) && + (code[i] & (0x1f<<24)) < (0x1d<<24)) { - OUT_RING( tmp ); + OUT_RING( code[i] ); j++; } + else printk("texture state dropped!!!\n"); } if (j & 1) @@ -617,9 +614,9 @@ if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) { OUT_RING( CMD_OP_DESTBUFFER_INFO ); OUT_RING( tmp ); - } else - DRM_DEBUG("bad di1 %x (allow %x or %x)\n", - tmp, dev_priv->front_di1, dev_priv->back_di1); + } + else + printk("buffer state dropped\n"); /* invarient: */ @@ -704,7 +701,6 @@ continue; if ( flags & I810_FRONT ) { - DRM_DEBUG("clear front\n"); BEGIN_LP_RING( 6 ); OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3 ); @@ -717,7 +713,6 @@ } if ( flags & I810_BACK ) { - DRM_DEBUG("clear back\n"); BEGIN_LP_RING( 6 ); OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3 ); @@ -730,7 +725,6 @@ } if ( flags & I810_DEPTH ) { - DRM_DEBUG("clear depth\n"); BEGIN_LP_RING( 6 ); OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3 ); @@ -756,8 +750,6 @@ int i; RING_LOCALS; - DRM_DEBUG("swapbuffers\n"); - i810_kernel_lost_context(dev); if (nbox > I810_NR_SAREA_CLIPRECTS) @@ -776,10 +768,6 @@ pbox->y2 > dev_priv->h) continue; - DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n", - pbox[i].x1, pbox[i].y1, - pbox[i].x2, pbox[i].y2); - BEGIN_LP_RING( 6 ); OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4 ); OUT_RING( pitch | (0xCC << 16)); @@ -804,7 +792,7 @@ int nbox = sarea_priv->nbox; unsigned long address = (unsigned long)buf->bus_address; unsigned long start = address - dev->agp->base; - int i = 0, u; + int i = 0; RING_LOCALS; i810_kernel_lost_context(dev); @@ -812,33 +800,16 @@ if (nbox > I810_NR_SAREA_CLIPRECTS) nbox = I810_NR_SAREA_CLIPRECTS; - if (discard) { - u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, - I810_BUF_HARDWARE); - if(u != I810_BUF_CLIENT) { - DRM_DEBUG("xxxx 2\n"); - } - } - if (used > 4*1024) used = 0; if (sarea_priv->dirty) i810EmitState( dev ); - DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n", - address, used, nbox); - - dev_priv->counter++; - DRM_DEBUG( "dispatch counter : %ld\n", dev_priv->counter); - DRM_DEBUG( "i810_dma_dispatch\n"); - DRM_DEBUG( "start : %lx\n", start); - DRM_DEBUG( "used : %d\n", used); - DRM_DEBUG( "start + used - 4 : %ld\n", start + used - 4); - if (buf_priv->currently_mapped == I810_BUF_MAPPED) { - *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE | - sarea_priv->vertex_prim | + unsigned int prim = (sarea_priv->vertex_prim & PR_MASK); + + *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE | prim | ((used/4)-2)); if (used & 4) { @@ -871,154 +842,62 @@ } while (++i < nbox); } - BEGIN_LP_RING(10); - OUT_RING( CMD_STORE_DWORD_IDX ); - OUT_RING( 20 ); - OUT_RING( dev_priv->counter ); - OUT_RING( 0 ); - if (discard) { + dev_priv->counter++; + + (void) cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, + I810_BUF_HARDWARE); + + BEGIN_LP_RING(8); + OUT_RING( CMD_STORE_DWORD_IDX ); + OUT_RING( 20 ); + OUT_RING( dev_priv->counter ); OUT_RING( CMD_STORE_DWORD_IDX ); OUT_RING( buf_priv->my_use_idx ); OUT_RING( I810_BUF_FREE ); + OUT_RING( CMD_REPORT_HEAD ); OUT_RING( 0 ); + ADVANCE_LP_RING(); } - - OUT_RING( CMD_REPORT_HEAD ); - OUT_RING( 0 ); - ADVANCE_LP_RING(); -} - - -/* Interrupts are only for flushing */ -void i810_dma_service(int irq, void *device, struct pt_regs *regs) -{ - drm_device_t *dev = (drm_device_t *)device; - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; - u16 temp; - - atomic_inc(&dev->counts[_DRM_STAT_IRQ]); - temp = I810_READ16(I810REG_INT_IDENTITY_R); - temp = temp & ~(0x6000); - if(temp != 0) I810_WRITE16(I810REG_INT_IDENTITY_R, - temp); /* Clear all interrupts */ - else - return; - - queue_task(&dev->tq, &tq_immediate); - mark_bh(IMMEDIATE_BH); } -void i810_dma_immediate_bh(void *device) -{ - drm_device_t *dev = (drm_device_t *) device; - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; - atomic_set(&dev_priv->flush_done, 1); - wake_up_interruptible(&dev_priv->flush_queue); -} - -static inline void i810_dma_emit_flush(drm_device_t *dev) -{ - drm_i810_private_t *dev_priv = dev->dev_private; - RING_LOCALS; - - i810_kernel_lost_context(dev); - - BEGIN_LP_RING(2); - OUT_RING( CMD_REPORT_HEAD ); - OUT_RING( GFX_OP_USER_INTERRUPT ); - ADVANCE_LP_RING(); - -/* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */ -/* atomic_set(&dev_priv->flush_done, 1); */ -/* wake_up_interruptible(&dev_priv->flush_queue); */ -} - -static inline void i810_dma_quiescent_emit(drm_device_t *dev) +void i810_dma_quiescent(drm_device_t *dev) { drm_i810_private_t *dev_priv = dev->dev_private; RING_LOCALS; +/* printk("%s\n", __FUNCTION__); */ + i810_kernel_lost_context(dev); BEGIN_LP_RING(4); OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE ); OUT_RING( CMD_REPORT_HEAD ); OUT_RING( 0 ); - OUT_RING( GFX_OP_USER_INTERRUPT ); + OUT_RING( 0 ); ADVANCE_LP_RING(); -/* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */ -/* atomic_set(&dev_priv->flush_done, 1); */ -/* wake_up_interruptible(&dev_priv->flush_queue); */ -} - -void i810_dma_quiescent(drm_device_t *dev) -{ - DECLARE_WAITQUEUE(entry, current); - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; - unsigned long end; - - if(dev_priv == NULL) { - return; - } - atomic_set(&dev_priv->flush_done, 0); - add_wait_queue(&dev_priv->flush_queue, &entry); - end = jiffies + (HZ*3); - - for (;;) { - current->state = TASK_INTERRUPTIBLE; - i810_dma_quiescent_emit(dev); - if (atomic_read(&dev_priv->flush_done) == 1) break; - if((signed)(end - jiffies) <= 0) { - DRM_ERROR("lockup\n"); - break; - } - schedule_timeout(HZ*3); - if (signal_pending(current)) { - break; - } - } - - current->state = TASK_RUNNING; - remove_wait_queue(&dev_priv->flush_queue, &entry); - - return; + i810_wait_ring( dev, dev_priv->ring.Size - 8 ); } static int i810_flush_queue(drm_device_t *dev) { - DECLARE_WAITQUEUE(entry, current); - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; + drm_i810_private_t *dev_priv = dev->dev_private; drm_device_dma_t *dma = dev->dma; - unsigned long end; int i, ret = 0; + RING_LOCALS; + +/* printk("%s\n", __FUNCTION__); */ - if(dev_priv == NULL) { - return 0; - } - atomic_set(&dev_priv->flush_done, 0); - add_wait_queue(&dev_priv->flush_queue, &entry); - end = jiffies + (HZ*3); - for (;;) { - current->state = TASK_INTERRUPTIBLE; - i810_dma_emit_flush(dev); - if (atomic_read(&dev_priv->flush_done) == 1) break; - if((signed)(end - jiffies) <= 0) { - DRM_ERROR("lockup\n"); - break; - } - schedule_timeout(HZ*3); - if (signal_pending(current)) { - ret = -EINTR; /* Can't restart */ - break; - } - } + i810_kernel_lost_context(dev); - current->state = TASK_RUNNING; - remove_wait_queue(&dev_priv->flush_queue, &entry); + BEGIN_LP_RING(2); + OUT_RING( CMD_REPORT_HEAD ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); + i810_wait_ring( dev, dev_priv->ring.Size - 8 ); for (i = 0; i < dma->buf_count; i++) { drm_buf_t *buf = dma->buflist[ i ]; @@ -1030,7 +909,7 @@ if (used == I810_BUF_HARDWARE) DRM_DEBUG("reclaimed from HARDWARE\n"); if (used == I810_BUF_CLIENT) - DRM_DEBUG("still on client HARDWARE\n"); + DRM_DEBUG("still on client\n"); } return ret; @@ -1070,7 +949,6 @@ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; - DRM_DEBUG("i810_flush_ioctl\n"); if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("i810_flush_ioctl called without lock held\n"); return -EINVAL; @@ -1101,9 +979,6 @@ return -EINVAL; } - DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n", - vertex.idx, vertex.used, vertex.discard); - if(vertex.idx < 0 || vertex.idx > dma->buf_count) return -EINVAL; i810_dma_dispatch_vertex( dev, @@ -1152,8 +1027,6 @@ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; - DRM_DEBUG("i810_swap_bufs\n"); - if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("i810_swap_buf called without lock held\n"); return -EINVAL; @@ -1189,7 +1062,6 @@ drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) dev_priv->sarea_priv; - DRM_DEBUG("getbuf\n"); if (copy_from_user(&d, (drm_i810_dma_t *)arg, sizeof(d))) return -EFAULT; @@ -1202,9 +1074,6 @@ retcode = i810_dma_get_buffer(dev, &d, filp); - DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", - current->pid, retcode, d.granted); - if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d))) return -EFAULT; sarea_priv->last_dispatch = (int) hw_status[5]; @@ -1212,47 +1081,19 @@ return retcode; } -int i810_copybuf(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +int i810_copybuf(struct inode *inode, + struct file *filp, + unsigned int cmd, + unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - drm_i810_copy_t d; - drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private; - u32 *hw_status = (u32 *)dev_priv->hw_status_page; - drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) - dev_priv->sarea_priv; - drm_buf_t *buf; - drm_i810_buf_priv_t *buf_priv; - drm_device_dma_t *dma = dev->dma; - - if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { - DRM_ERROR("i810_dma called without lock held\n"); - return -EINVAL; - } - - if (copy_from_user(&d, (drm_i810_copy_t *)arg, sizeof(d))) - return -EFAULT; - - if(d.idx < 0 || d.idx > dma->buf_count) return -EINVAL; - buf = dma->buflist[ d.idx ]; - buf_priv = buf->dev_private; - if (buf_priv->currently_mapped != I810_BUF_MAPPED) return -EPERM; - - if(d.used < 0 || d.used > buf->total) return -EINVAL; - - if (copy_from_user(buf_priv->virtual, d.address, d.used)) - return -EFAULT; - - sarea_priv->last_dispatch = (int) hw_status[5]; - + /* Never copy - 2.4.x doesn't need it */ return 0; } int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - if(VM_DONTCOPY == 0) return 1; + /* Never copy - 2.4.x doesn't need it */ return 0; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/i810_drm.h linux.21pre4-ac2/drivers/char/drm/i810_drm.h --- linux.21pre4/drivers/char/drm/i810_drm.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/i810_drm.h 2003-01-06 17:27:11.000000000 +0000 @@ -88,6 +88,8 @@ #define I810_TEXREG_MCS 7 /* GFX_OP_MAP_COORD_SETS ??? */ #define I810_TEX_SETUP_SIZE 8 +/* Flags for clear ioctl + */ #define I810_FRONT 0x1 #define I810_BACK 0x2 #define I810_DEPTH 0x4 @@ -166,14 +168,34 @@ } drm_i810_sarea_t; +/* WARNING: If you change any of these defines, make sure to wear a bullet + * proof vest since these are part of the stable kernel<->userspace ABI + */ + +/* i810 specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t) +#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t) +#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t) +#define DRM_IOCTL_I810_FLUSH DRM_IO( 0x43) +#define DRM_IOCTL_I810_GETAGE DRM_IO( 0x44) +#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t) +#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46) +#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t) +#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48) +#define DRM_IOCTL_I810_OV0INFO DRM_IOR( 0x49, drm_i810_overlay_t) +#define DRM_IOCTL_I810_FSTATUS DRM_IO ( 0x4a) +#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( 0x4b) +#define DRM_IOCTL_I810_MC DRM_IOW( 0x4c, drm_i810_mc_t) +#define DRM_IOCTL_I810_RSTATUS DRM_IO ( 0x4d ) + typedef struct _drm_i810_clear { int clear_color; int clear_depth; int flags; } drm_i810_clear_t; - - /* These may be placeholders if we have more cliprects than * I810_NR_SAREA_CLIPRECTS. In that case, the client sets discard to * false, indicating that the buffer will be dispatched again with a @@ -191,6 +213,17 @@ void *address; /* Address to copy from */ } drm_i810_copy_t; +#define PR_TRIANGLES (0x0<<18) +#define PR_TRISTRIP_0 (0x1<<18) +#define PR_TRISTRIP_1 (0x2<<18) +#define PR_TRIFAN (0x3<<18) +#define PR_POLYGON (0x4<<18) +#define PR_LINES (0x5<<18) +#define PR_LINESTRIP (0x6<<18) +#define PR_RECTS (0x7<<18) +#define PR_MASK (0x7<<18) + + typedef struct drm_i810_dma { void *virtual; int request_idx; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/i810_drv.c linux.21pre4-ac2/drivers/char/drm/i810_drv.c --- linux.21pre4/drivers/char/drm/i810_drv.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/i810_drv.c 2003-01-06 17:27:11.000000000 +0000 @@ -33,42 +33,10 @@ #include #include "i810.h" #include "drmP.h" +#include "drm.h" +#include "i810_drm.h" #include "i810_drv.h" -#define DRIVER_AUTHOR "VA Linux Systems Inc." - -#define DRIVER_NAME "i810" -#define DRIVER_DESC "Intel i810" -#define DRIVER_DATE "20010920" - -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 2 -#define DRIVER_PATCHLEVEL 0 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_INIT)] = { i810_dma_init, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_VERTEX)] = { i810_dma_vertex, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_CLEAR)] = { i810_clear_bufs, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_FLUSH)] = { i810_flush_ioctl, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_GETAGE)] = { i810_getage, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_GETBUF)] = { i810_getbuf, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_SWAP)] = { i810_swap_bufs, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_COPY)] = { i810_copybuf, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_DOCOPY)] = { i810_docopy, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_OV0INFO)] = { i810_ov0_info, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_FSTATUS)] = { i810_fstatus, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_OV0FLIP)] = { i810_ov0_flip, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_MC)] = { i810_dma_mc, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I810_RSTATUS)] = { i810_rstatus, 1, 0 } - - -#define __HAVE_COUNTERS 4 -#define __HAVE_COUNTER6 _DRM_STAT_IRQ -#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY -#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY -#define __HAVE_COUNTER9 _DRM_STAT_DMA - - #include "drm_agpsupport.h" #include "drm_auth.h" #include "drm_bufs.h" @@ -77,25 +45,6 @@ #include "drm_drawable.h" #include "drm_drv.h" -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init i810_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", i810_options ); -#endif - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/i810_drv.h linux.21pre4-ac2/drivers/char/drm/i810_drv.h --- linux.21pre4/drivers/char/drm/i810_drv.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/i810_drv.h 2003-01-06 17:27:11.000000000 +0000 @@ -63,10 +63,9 @@ unsigned long hw_status_page; unsigned long counter; - dma_addr_t dma_status_page; - atomic_t flush_done; - wait_queue_head_t flush_queue; /* Processes waiting until flush */ + dma_addr_t dma_status_page; + drm_buf_t *mmap_buffer; @@ -78,6 +77,7 @@ int overlay_physical; int w, h; int pitch; + } drm_i810_private_t; /* i810_dma.c */ @@ -92,8 +92,13 @@ extern int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma); + +/* Obsolete: + */ extern int i810_copybuf(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); +/* Obsolete: + */ extern int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); @@ -111,9 +116,6 @@ extern void i810_dma_quiescent(drm_device_t *dev); -#define I810_VERBOSE 0 - - int i810_dma_vertex(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/i810.h linux.21pre4-ac2/drivers/char/drm/i810.h --- linux.21pre4/drivers/char/drm/i810.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/i810.h 2003-01-06 17:27:11.000000000 +0000 @@ -41,6 +41,47 @@ #define __HAVE_MTRR 1 #define __HAVE_CTX_BITMAP 1 +#define DRIVER_AUTHOR "VA Linux Systems Inc." + +#define DRIVER_NAME "i810" +#define DRIVER_DESC "Intel i810" +#define DRIVER_DATE "20020211" + +/* Interface history + * + * 1.1 - XFree86 4.1 + * 1.2 - XvMC interfaces + * - XFree86 4.2 + * 1.2.1 - Disable copying code (leave stub ioctls for backwards compatibility) + * - Remove requirement for interrupt (leave stubs again) + */ +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 2 +#define DRIVER_PATCHLEVEL 1 + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_INIT)] = { i810_dma_init, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_VERTEX)] = { i810_dma_vertex, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_CLEAR)] = { i810_clear_bufs, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_FLUSH)] = { i810_flush_ioctl, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_GETAGE)] = { i810_getage, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_GETBUF)] = { i810_getbuf, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_SWAP)] = { i810_swap_bufs, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_COPY)] = { i810_copybuf, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_DOCOPY)] = { i810_docopy, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_OV0INFO)] = { i810_ov0_info, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_FSTATUS)] = { i810_fstatus, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_OV0FLIP)] = { i810_ov0_flip, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_MC)] = { i810_dma_mc, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I810_RSTATUS)] = { i810_rstatus, 1, 0 } + + +#define __HAVE_COUNTERS 4 +#define __HAVE_COUNTER6 _DRM_STAT_IRQ +#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY +#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY +#define __HAVE_COUNTER9 _DRM_STAT_DMA + /* Driver customization: */ #define __HAVE_RELEASE 1 @@ -60,50 +101,10 @@ i810_dma_quiescent( dev ); \ } while (0) -#define __HAVE_DMA_IRQ 1 -#define __HAVE_DMA_IRQ_BH 1 -#define __HAVE_SHARED_IRQ 1 -#define DRIVER_PREINSTALL() do { \ - drm_i810_private_t *dev_priv = \ - (drm_i810_private_t *)dev->dev_private; \ - u16 tmp; \ - tmp = I810_READ16( I810REG_HWSTAM ); \ - tmp = tmp & 0x6000; \ - I810_WRITE16( I810REG_HWSTAM, tmp ); \ - \ - tmp = I810_READ16( I810REG_INT_MASK_R ); \ - tmp = tmp & 0x6000; /* Unmask interrupts */ \ - I810_WRITE16( I810REG_INT_MASK_R, tmp ); \ - tmp = I810_READ16( I810REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; /* Disable all interrupts */ \ - I810_WRITE16( I810REG_INT_ENABLE_R, tmp ); \ -} while (0) - -#define DRIVER_POSTINSTALL() do { \ - drm_i810_private_t *dev_priv = \ - (drm_i810_private_t *)dev->dev_private; \ - u16 tmp; \ - tmp = I810_READ16( I810REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; \ - tmp = tmp | 0x0003; /* Enable bp & user interrupts */ \ - I810_WRITE16( I810REG_INT_ENABLE_R, tmp ); \ -} while (0) - -#define DRIVER_UNINSTALL() do { \ - drm_i810_private_t *dev_priv = \ - (drm_i810_private_t *)dev->dev_private; \ - u16 tmp; \ - if ( dev_priv ) { \ - tmp = I810_READ16( I810REG_INT_IDENTITY_R ); \ - tmp = tmp & ~(0x6000); /* Clear all interrupts */ \ - if ( tmp != 0 ) \ - I810_WRITE16( I810REG_INT_IDENTITY_R, tmp ); \ - \ - tmp = I810_READ16( I810REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; /* Disable all interrupts */ \ - I810_WRITE16( I810REG_INT_ENABLE_R, tmp ); \ - } \ -} while (0) +/* Don't need an irq any more. The template code will make sure that + * a noop stub is generated for compatibility. + */ +#define __HAVE_DMA_IRQ 0 /* Buffer customization: */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/i830_dma.c linux.21pre4-ac2/drivers/char/drm/i830_dma.c --- linux.21pre4/drivers/char/drm/i830_dma.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/i830_dma.c 2003-01-31 11:54:14.000000000 +0000 @@ -26,20 +26,22 @@ * * Authors: Rickard E. (Rik) Faith * Jeff Hartmann - * Keith Whitwell - * Abraham vd Merwe + * Keith Whitwell + * Abraham vd Merwe * */ + #include "i830.h" #include "drmP.h" +#include "drm.h" +#include "i830_drm.h" #include "i830_drv.h" #include /* For task queue support */ +#include /* For FASTCALL on unlock_page() */ #include -/* in case we don't have a 2.3.99-pre6 kernel or later: */ -#ifndef VM_DONTCOPY -#define VM_DONTCOPY 0 -#endif + +#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1) #define I830_BUF_FREE 2 #define I830_BUF_CLIENT 1 @@ -48,54 +50,24 @@ #define I830_BUF_UNMAPPED 0 #define I830_BUF_MAPPED 1 -#define RING_LOCALS unsigned int outring, ringmask; volatile char *virt; -#define DO_IDLE_WORKAROUND() \ -do { \ - int _head; \ - int _tail; \ - do { \ - _head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; \ - _tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR; \ - udelay(1); \ - } while(_head != _tail); \ -} while(0) - -#define I830_SYNC_WORKAROUND 0 - -#define BEGIN_LP_RING(n) do { \ - if (I830_VERBOSE) \ - DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \ - n, __FUNCTION__); \ - if (I830_SYNC_WORKAROUND) \ - DO_IDLE_WORKAROUND(); \ - if (dev_priv->ring.space < n*4) \ - i830_wait_ring(dev, n*4); \ - dev_priv->ring.space -= n*4; \ - outring = dev_priv->ring.tail; \ - ringmask = dev_priv->ring.tail_mask; \ - virt = dev_priv->ring.virtual_start; \ -} while (0) - -#define ADVANCE_LP_RING() do { \ - if (I830_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \ - dev_priv->ring.tail = outring; \ - I830_WRITE(LP_RING + RING_TAIL, outring); \ -} while(0) - -#define OUT_RING(n) do { \ - if (I830_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ - *(volatile unsigned int *)(virt + outring) = n; \ - outring += 4; \ - outring &= ringmask; \ -} while (0); + + + + + + + + + + static inline void i830_print_status_page(drm_device_t *dev) { drm_device_dma_t *dma = dev->dma; drm_i830_private_t *dev_priv = dev->dev_private; - u8 *temp = dev_priv->hw_status_page; + u32 *temp = (u32 *)dev_priv->hw_status_page; int i; DRM_DEBUG( "hw_status: Interrupt Status : %x\n", temp[0]); @@ -149,14 +121,14 @@ } static struct file_operations i830_buffer_fops = { - open: DRM(open), - flush: DRM(flush), - release: DRM(release), - ioctl: DRM(ioctl), - mmap: i830_mmap_buffers, - read: DRM(read), - fasync: DRM(fasync), - poll: DRM(poll), + .open = DRM(open), + .flush = DRM(flush), + .release = DRM(release), + .ioctl = DRM(ioctl), + .mmap = i830_mmap_buffers, + .read = DRM(read), + .fasync = DRM(fasync), + .poll = DRM(poll), }; int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma) @@ -179,7 +151,7 @@ buf_priv->currently_mapped = I830_BUF_MAPPED; unlock_kernel(); - if (remap_page_range(vma->vm_start, + if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start, VM_OFFSET(vma), vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; @@ -197,28 +169,24 @@ if(buf_priv->currently_mapped == I830_BUF_MAPPED) return -EINVAL; - if(VM_DONTCOPY != 0) { - down_write( ¤t->mm->mmap_sem ); - old_fops = filp->f_op; - filp->f_op = &i830_buffer_fops; - dev_priv->mmap_buffer = buf; - buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, - PROT_READ|PROT_WRITE, - MAP_SHARED, - buf->bus_address); - dev_priv->mmap_buffer = NULL; - filp->f_op = old_fops; - if ((unsigned long)buf_priv->virtual > -1024UL) { - /* Real error */ - DRM_DEBUG("mmap error\n"); - retcode = (signed int)buf_priv->virtual; - buf_priv->virtual = 0; - } - up_write( ¤t->mm->mmap_sem ); - } else { - buf_priv->virtual = buf_priv->kernel_virtual; - buf_priv->currently_mapped = I830_BUF_MAPPED; + down_write( ¤t->mm->mmap_sem ); + old_fops = filp->f_op; + filp->f_op = &i830_buffer_fops; + dev_priv->mmap_buffer = buf; + buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, + PROT_READ|PROT_WRITE, + MAP_SHARED, + buf->bus_address); + dev_priv->mmap_buffer = NULL; + filp->f_op = old_fops; + if ((unsigned long)buf_priv->virtual > -1024UL) { + /* Real error */ + DRM_ERROR("mmap error\n"); + retcode = (signed int)buf_priv->virtual; + buf_priv->virtual = 0; } + up_write( ¤t->mm->mmap_sem ); + return retcode; } @@ -227,15 +195,15 @@ drm_i830_buf_priv_t *buf_priv = buf->dev_private; int retcode = 0; - if(VM_DONTCOPY != 0) { - if(buf_priv->currently_mapped != I830_BUF_MAPPED) - return -EINVAL; - down_write( ¤t->mm->mmap_sem ); - retcode = do_munmap(current->mm, - (unsigned long)buf_priv->virtual, - (size_t) buf->total); - up_write( ¤t->mm->mmap_sem ); - } + if(buf_priv->currently_mapped != I830_BUF_MAPPED) + return -EINVAL; + + down_write(¤t->mm->mmap_sem); + retcode = DO_MUNMAP(current->mm, + (unsigned long)buf_priv->virtual, + (size_t) buf->total); + up_write(¤t->mm->mmap_sem); + buf_priv->currently_mapped = I830_BUF_UNMAPPED; buf_priv->virtual = 0; @@ -260,7 +228,7 @@ retcode = i830_map_buffer(buf, filp); if(retcode) { i830_freelist_put(dev, buf); - DRM_DEBUG("mapbuf failed, retcode %d\n", retcode); + DRM_ERROR("mapbuf failed, retcode %d\n", retcode); return retcode; } buf->pid = priv->pid; @@ -286,12 +254,22 @@ DRM(ioremapfree)((void *) dev_priv->ring.virtual_start, dev_priv->ring.Size); } - if(dev_priv->hw_status_page != NULL) { - pci_free_consistent(dev->pdev, PAGE_SIZE, - dev_priv->hw_status_page, dev_priv->dma_status_page); + if(dev_priv->hw_status_page != 0UL) { + pci_free_consistent(dev->pdev, PAGE_SIZE, + (void *)dev_priv->hw_status_page, + dev_priv->dma_status_page); /* Need to rewrite hardware status page */ I830_WRITE(0x02080, 0x1ffff000); } + + /* Disable interrupts here because after dev_private + * is freed, it's too late. + */ + if (dev->irq) { + I830_WRITE16( I830REG_INT_MASK_R, 0xffff ); + I830_WRITE16( I830REG_INT_ENABLE_R, 0x0 ); + } + DRM(free)(dev->dev_private, sizeof(drm_i830_private_t), DRM_MEM_DRIVER); dev->dev_private = NULL; @@ -305,7 +283,7 @@ return 0; } -static int i830_wait_ring(drm_device_t *dev, int n) +int i830_wait_ring(drm_device_t *dev, int n, const char *caller) { drm_i830_private_t *dev_priv = dev->dev_private; drm_i830_ring_buffer_t *ring = &(dev_priv->ring); @@ -314,7 +292,7 @@ unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; end = jiffies + (HZ*3); - while (ring->space < n) { + while (ring->space < n) { ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; ring->space = ring->head - (ring->tail+8); if (ring->space < 0) ring->space += ring->Size; @@ -325,13 +303,13 @@ } iters++; - if(time_before(end,jiffies)) { + if(time_before(end, jiffies)) { DRM_ERROR("space: %d wanted %d\n", ring->space, n); DRM_ERROR("lockup\n"); goto out_wait_ring; } - - udelay(1); + udelay(1); + dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT; } out_wait_ring: @@ -344,9 +322,12 @@ drm_i830_ring_buffer_t *ring = &(dev_priv->ring); ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; - ring->tail = I830_READ(LP_RING + RING_TAIL); + ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR; ring->space = ring->head - (ring->tail+8); if (ring->space < 0) ring->space += ring->Size; + + if (ring->head == ring->tail) + dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY; } static int i830_freelist_init(drm_device_t *dev, drm_i830_private_t *dev_priv) @@ -420,9 +401,6 @@ ((u8 *)dev_priv->sarea_map->handle + init->sarea_priv_offset); - atomic_set(&dev_priv->flush_done, 0); - init_waitqueue_head(&dev_priv->flush_queue); - dev_priv->ring.Start = init->ring_start; dev_priv->ring.End = init->ring_end; dev_priv->ring.Size = init->ring_size; @@ -446,11 +424,17 @@ dev_priv->pitch = init->pitch; dev_priv->back_offset = init->back_offset; dev_priv->depth_offset = init->depth_offset; + dev_priv->front_offset = init->front_offset; dev_priv->front_di1 = init->front_offset | init->pitch_bits; dev_priv->back_di1 = init->back_offset | init->pitch_bits; dev_priv->zi1 = init->depth_offset | init->pitch_bits; + DRM_DEBUG("front_di1 %x\n", dev_priv->front_di1); + DRM_DEBUG("back_offset %x\n", dev_priv->back_offset); + DRM_DEBUG("back_di1 %x\n", dev_priv->back_di1); + DRM_DEBUG("pitch_bits %x\n", init->pitch_bits); + dev_priv->cpp = init->cpp; /* We are using seperate values as placeholders for mechanisms for * private backbuffer/depthbuffer usage. @@ -458,20 +442,23 @@ dev_priv->back_pitch = init->back_pitch; dev_priv->depth_pitch = init->depth_pitch; + dev_priv->do_boxes = 0; + dev_priv->use_mi_batchbuffer_start = 0; /* Program Hardware Status Page */ - dev_priv->hw_status_page = pci_alloc_consistent(dev->pdev, PAGE_SIZE, + dev_priv->hw_status_page = + (unsigned long) pci_alloc_consistent(dev->pdev, PAGE_SIZE, &dev_priv->dma_status_page); - if(dev_priv->hw_status_page == NULL) { + if(dev_priv->hw_status_page == 0UL) { dev->dev_private = (void *)dev_priv; i830_dma_cleanup(dev); DRM_ERROR("Can not allocate hardware status page\n"); return -ENOMEM; } - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); - DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); + memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE); + DRM_DEBUG("hw status page @ %lx\n", dev_priv->hw_status_page); - I830_WRITE(0x02080, dev_priv->dma_status_page); + I830_WRITE(0x02080, dev_priv->dma_status_page); DRM_DEBUG("Enabled hardware status page\n"); /* Now we need to init our freelist */ @@ -517,83 +504,107 @@ return retcode; } +#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) +#define ST1_ENABLE (1<<16) +#define ST1_MASK (0xffff) + /* Most efficient way to verify state for the i830 is as it is * emitted. Non-conformant state is silently dropped. - * - * Use 'volatile' & local var tmp to force the emitted values to be - * identical to the verified ones. */ -static void i830EmitContextVerified( drm_device_t *dev, - volatile unsigned int *code ) +static void i830EmitContextVerified( drm_device_t *dev, + unsigned int *code ) { drm_i830_private_t *dev_priv = dev->dev_private; int i, j = 0; unsigned int tmp; RING_LOCALS; - BEGIN_LP_RING( I830_CTX_SETUP_SIZE ); - for ( i = 0 ; i < I830_CTX_SETUP_SIZE ; i++ ) { + BEGIN_LP_RING( I830_CTX_SETUP_SIZE + 4 ); + + for ( i = 0 ; i < I830_CTXREG_BLENDCOLR0 ; i++ ) { tmp = code[i]; + if ((tmp & (7<<29)) == CMD_3D && + (tmp & (0x1f<<24)) < (0x1d<<24)) { + OUT_RING( tmp ); + j++; + } else { + DRM_ERROR("Skipping %d\n", i); + } + } -#if 0 - if ((tmp & (7<<29)) == (3<<29) && + OUT_RING( STATE3D_CONST_BLEND_COLOR_CMD ); + OUT_RING( code[I830_CTXREG_BLENDCOLR] ); + j += 2; + + for ( i = I830_CTXREG_VF ; i < I830_CTXREG_MCSB0 ; i++ ) { + tmp = code[i]; + if ((tmp & (7<<29)) == CMD_3D && (tmp & (0x1f<<24)) < (0x1d<<24)) { OUT_RING( tmp ); j++; } else { - printk("Skipping %d\n", i); + DRM_ERROR("Skipping %d\n", i); } -#else - OUT_RING( tmp ); - j++; -#endif } + OUT_RING( STATE3D_MAP_COORD_SETBIND_CMD ); + OUT_RING( code[I830_CTXREG_MCSB1] ); + j += 2; + if (j & 1) OUT_RING( 0 ); ADVANCE_LP_RING(); } -static void i830EmitTexVerified( drm_device_t *dev, - volatile unsigned int *code ) +static void i830EmitTexVerified( drm_device_t *dev, unsigned int *code ) { drm_i830_private_t *dev_priv = dev->dev_private; int i, j = 0; unsigned int tmp; RING_LOCALS; - BEGIN_LP_RING( I830_TEX_SETUP_SIZE ); - - OUT_RING( GFX_OP_MAP_INFO ); - OUT_RING( code[I830_TEXREG_MI1] ); - OUT_RING( code[I830_TEXREG_MI2] ); - OUT_RING( code[I830_TEXREG_MI3] ); - OUT_RING( code[I830_TEXREG_MI4] ); - OUT_RING( code[I830_TEXREG_MI5] ); - - for ( i = 6 ; i < I830_TEX_SETUP_SIZE ; i++ ) { - tmp = code[i]; - OUT_RING( tmp ); - j++; - } + if (code[I830_TEXREG_MI0] == GFX_OP_MAP_INFO || + (code[I830_TEXREG_MI0] & ~(0xf*LOAD_TEXTURE_MAP0)) == + (STATE3D_LOAD_STATE_IMMEDIATE_2|4)) { + + BEGIN_LP_RING( I830_TEX_SETUP_SIZE ); + + OUT_RING( code[I830_TEXREG_MI0] ); /* TM0LI */ + OUT_RING( code[I830_TEXREG_MI1] ); /* TM0S0 */ + OUT_RING( code[I830_TEXREG_MI2] ); /* TM0S1 */ + OUT_RING( code[I830_TEXREG_MI3] ); /* TM0S2 */ + OUT_RING( code[I830_TEXREG_MI4] ); /* TM0S3 */ + OUT_RING( code[I830_TEXREG_MI5] ); /* TM0S4 */ + + for ( i = 6 ; i < I830_TEX_SETUP_SIZE ; i++ ) { + tmp = code[i]; + OUT_RING( tmp ); + j++; + } - if (j & 1) - OUT_RING( 0 ); + if (j & 1) + OUT_RING( 0 ); - ADVANCE_LP_RING(); + ADVANCE_LP_RING(); + } + else + printk("rejected packet %x\n", code[0]); } static void i830EmitTexBlendVerified( drm_device_t *dev, - volatile unsigned int *code, - volatile unsigned int num) + unsigned int *code, + unsigned int num) { drm_i830_private_t *dev_priv = dev->dev_private; int i, j = 0; unsigned int tmp; RING_LOCALS; - BEGIN_LP_RING( num ); + if (!num) + return; + + BEGIN_LP_RING( num + 1 ); for ( i = 0 ; i < num ; i++ ) { tmp = code[i]; @@ -616,6 +627,8 @@ int i; RING_LOCALS; + return; /* Is this right ? -- Arjan */ + BEGIN_LP_RING( 258 ); if(is_shared == 1) { @@ -629,44 +642,43 @@ OUT_RING(palette[i]); } OUT_RING(0); + /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop! + */ } /* Need to do some additional checking when setting the dest buffer. */ static void i830EmitDestVerified( drm_device_t *dev, - volatile unsigned int *code ) + unsigned int *code ) { drm_i830_private_t *dev_priv = dev->dev_private; unsigned int tmp; RING_LOCALS; - BEGIN_LP_RING( I830_DEST_SETUP_SIZE + 6 ); + BEGIN_LP_RING( I830_DEST_SETUP_SIZE + 10 ); + tmp = code[I830_DESTREG_CBUFADDR]; - if (tmp == dev_priv->front_di1) { - /* Don't use fence when front buffer rendering */ - OUT_RING( CMD_OP_DESTBUFFER_INFO ); - OUT_RING( BUF_3D_ID_COLOR_BACK | - BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) ); - OUT_RING( tmp ); + if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) { + if (((int)outring) & 8) { + OUT_RING(0); + OUT_RING(0); + } OUT_RING( CMD_OP_DESTBUFFER_INFO ); - OUT_RING( BUF_3D_ID_DEPTH | - BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp)); - OUT_RING( dev_priv->zi1 ); - } else if(tmp == dev_priv->back_di1) { - OUT_RING( CMD_OP_DESTBUFFER_INFO ); OUT_RING( BUF_3D_ID_COLOR_BACK | BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) | BUF_3D_USE_FENCE); OUT_RING( tmp ); + OUT_RING( 0 ); OUT_RING( CMD_OP_DESTBUFFER_INFO ); OUT_RING( BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE | BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp)); OUT_RING( dev_priv->zi1 ); + OUT_RING( 0 ); } else { - DRM_DEBUG("bad di1 %x (allow %x or %x)\n", + DRM_ERROR("bad di1 %x (allow %x or %x)\n", tmp, dev_priv->front_di1, dev_priv->back_di1); } @@ -688,25 +700,39 @@ if((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) { OUT_RING( tmp ); } else { - DRM_DEBUG("bad scissor enable\n"); + DRM_ERROR("bad scissor enable\n"); OUT_RING( 0 ); } - OUT_RING( code[I830_DESTREG_SENABLE] ); - OUT_RING( GFX_OP_SCISSOR_RECT ); OUT_RING( code[I830_DESTREG_SR1] ); OUT_RING( code[I830_DESTREG_SR2] ); + OUT_RING( 0 ); ADVANCE_LP_RING(); } +static void i830EmitStippleVerified( drm_device_t *dev, + unsigned int *code ) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + BEGIN_LP_RING( 2 ); + OUT_RING( GFX_OP_STIPPLE ); + OUT_RING( code[1] ); + ADVANCE_LP_RING(); +} + + static void i830EmitState( drm_device_t *dev ) { drm_i830_private_t *dev_priv = dev->dev_private; drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int dirty = sarea_priv->dirty; + DRM_DEBUG("%s %x\n", __FUNCTION__, dirty); + if (dirty & I830_UPLOAD_BUFFERS) { i830EmitDestVerified( dev, sarea_priv->BufferState ); sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS; @@ -740,17 +766,154 @@ } if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) { - i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1); + i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1); + } else { + if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) { + i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0); + sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0); + } + if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) { + i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0); + sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1); + } + + /* 1.3: + */ +#if 0 + if (dirty & I830_UPLOAD_TEX_PALETTE_N(2)) { + i830EmitTexPalette(dev, sarea_priv->Palette2[0], 0, 0); + sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2); + } + if (dirty & I830_UPLOAD_TEX_PALETTE_N(3)) { + i830EmitTexPalette(dev, sarea_priv->Palette2[1], 1, 0); + sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2); + } +#endif + } + + /* 1.3: + */ + if (dirty & I830_UPLOAD_STIPPLE) { + i830EmitStippleVerified( dev, + sarea_priv->StippleState); + sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE; + } + + if (dirty & I830_UPLOAD_TEX2) { + i830EmitTexVerified( dev, sarea_priv->TexState2 ); + sarea_priv->dirty &= ~I830_UPLOAD_TEX2; + } + + if (dirty & I830_UPLOAD_TEX3) { + i830EmitTexVerified( dev, sarea_priv->TexState3 ); + sarea_priv->dirty &= ~I830_UPLOAD_TEX3; + } + + + if (dirty & I830_UPLOAD_TEXBLEND2) { + i830EmitTexBlendVerified( + dev, + sarea_priv->TexBlendState2, + sarea_priv->TexBlendStateWordsUsed2); + + sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND2; + } + + if (dirty & I830_UPLOAD_TEXBLEND3) { + i830EmitTexBlendVerified( + dev, + sarea_priv->TexBlendState3, + sarea_priv->TexBlendStateWordsUsed3); + sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3; + } +} + +/* ================================================================ + * Performance monitoring functions + */ + +static void i830_fill_box( drm_device_t *dev, + int x, int y, int w, int h, + int r, int g, int b ) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + u32 color; + unsigned int BR13, CMD; + RING_LOCALS; + + BR13 = (0xF0 << 16) | (dev_priv->pitch * dev_priv->cpp) | (1<<24); + CMD = XY_COLOR_BLT_CMD; + x += dev_priv->sarea_priv->boxes[0].x1; + y += dev_priv->sarea_priv->boxes[0].y1; + + if (dev_priv->cpp == 4) { + BR13 |= (1<<25); + CMD |= (XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB); + color = (((0xff) << 24) | (r << 16) | (g << 8) | b); } else { - if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) { - i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0); - sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0); - } - if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) { - i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0); - sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1); - } + color = (((r & 0xf8) << 8) | + ((g & 0xfc) << 3) | + ((b & 0xf8) >> 3)); + } + + BEGIN_LP_RING( 6 ); + OUT_RING( CMD ); + OUT_RING( BR13 ); + OUT_RING( (y << 16) | x ); + OUT_RING( ((y+h) << 16) | (x+w) ); + + if ( dev_priv->current_page == 1 ) { + OUT_RING( dev_priv->front_offset ); + } else { + OUT_RING( dev_priv->back_offset ); + } + + OUT_RING( color ); + ADVANCE_LP_RING(); +} + +static void i830_cp_performance_boxes( drm_device_t *dev ) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + + /* Purple box for page flipping + */ + if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_FLIP ) + i830_fill_box( dev, 4, 4, 8, 8, 255, 0, 255 ); + + /* Red box if we have to wait for idle at any point + */ + if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_WAIT ) + i830_fill_box( dev, 16, 4, 8, 8, 255, 0, 0 ); + + /* Blue box: lost context? + */ + if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_LOST_CONTEXT ) + i830_fill_box( dev, 28, 4, 8, 8, 0, 0, 255 ); + + /* Yellow box for texture swaps + */ + if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_TEXTURE_LOAD ) + i830_fill_box( dev, 40, 4, 8, 8, 255, 255, 0 ); + + /* Green box if hardware never idles (as far as we can tell) + */ + if ( !(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY) ) + i830_fill_box( dev, 64, 4, 8, 8, 0, 255, 0 ); + + + /* Draw bars indicating number of buffers allocated + * (not a great measure, easily confused) + */ + if (dev_priv->dma_used) { + int bar = dev_priv->dma_used / 10240; + if (bar > 100) bar = 100; + if (bar < 1) bar = 1; + i830_fill_box( dev, 4, 16, bar, 4, 196, 128, 128 ); + dev_priv->dma_used = 0; } + + dev_priv->sarea_priv->perf_boxes = 0; } static void i830_dma_dispatch_clear( drm_device_t *dev, int flags, @@ -768,6 +931,15 @@ unsigned int BR13, CMD, D_CMD; RING_LOCALS; + + if ( dev_priv->current_page == 1 ) { + unsigned int tmp = flags; + + flags &= ~(I830_FRONT | I830_BACK); + if ( tmp & I830_FRONT ) flags |= I830_BACK; + if ( tmp & I830_BACK ) flags |= I830_FRONT; + } + i830_kernel_lost_context(dev); switch(cpp) { @@ -808,7 +980,7 @@ OUT_RING( BR13 ); OUT_RING( (pbox->y1 << 16) | pbox->x1 ); OUT_RING( (pbox->y2 << 16) | pbox->x2 ); - OUT_RING( 0 ); + OUT_RING( dev_priv->front_offset ); OUT_RING( clear_color ); ADVANCE_LP_RING(); } @@ -847,13 +1019,17 @@ drm_clip_rect_t *pbox = sarea_priv->boxes; int pitch = dev_priv->pitch; int cpp = dev_priv->cpp; - int ofs = dev_priv->back_offset; int i; unsigned int CMD, BR13; RING_LOCALS; DRM_DEBUG("swapbuffers\n"); + i830_kernel_lost_context(dev); + + if (dev_priv->do_boxes) + i830_cp_performance_boxes( dev ); + switch(cpp) { case 2: BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24); @@ -870,7 +1046,6 @@ break; } - i830_kernel_lost_context(dev); if (nbox > I830_NR_SAREA_CLIPRECTS) nbox = I830_NR_SAREA_CLIPRECTS; @@ -890,23 +1065,72 @@ BEGIN_LP_RING( 8 ); OUT_RING( CMD ); OUT_RING( BR13 ); + OUT_RING( (pbox->y1 << 16) | pbox->x1 ); + OUT_RING( (pbox->y2 << 16) | pbox->x2 ); - OUT_RING( (pbox->y1 << 16) | - pbox->x1 ); - OUT_RING( (pbox->y2 << 16) | - pbox->x2 ); - - OUT_RING( 0 /* front ofs always zero */ ); - OUT_RING( (pbox->y1 << 16) | - pbox->x1 ); + if (dev_priv->current_page == 0) + OUT_RING( dev_priv->front_offset ); + else + OUT_RING( dev_priv->back_offset ); + OUT_RING( (pbox->y1 << 16) | pbox->x1 ); OUT_RING( BR13 & 0xffff ); - OUT_RING( ofs ); + + if (dev_priv->current_page == 0) + OUT_RING( dev_priv->back_offset ); + else + OUT_RING( dev_priv->front_offset ); ADVANCE_LP_RING(); } } +static void i830_dma_dispatch_flip( drm_device_t *dev ) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n", + __FUNCTION__, + dev_priv->current_page, + dev_priv->sarea_priv->pf_current_page); + + i830_kernel_lost_context(dev); + + if (dev_priv->do_boxes) { + dev_priv->sarea_priv->perf_boxes |= I830_BOX_FLIP; + i830_cp_performance_boxes( dev ); + } + + + BEGIN_LP_RING( 2 ); + OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); + + BEGIN_LP_RING( 6 ); + OUT_RING( CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP ); + OUT_RING( 0 ); + if ( dev_priv->current_page == 0 ) { + OUT_RING( dev_priv->back_offset ); + dev_priv->current_page = 1; + } else { + OUT_RING( dev_priv->front_offset ); + dev_priv->current_page = 0; + } + OUT_RING(0); + ADVANCE_LP_RING(); + + + BEGIN_LP_RING( 2 ); + OUT_RING( MI_WAIT_FOR_EVENT | + MI_WAIT_FOR_PLANE_A_FLIP ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); + + + dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; +} static void i830_dma_dispatch_vertex(drm_device_t *dev, drm_buf_t *buf, @@ -936,7 +1160,7 @@ } } - if (used > 4*1024) + if (used > 4*1023) used = 0; if (sarea_priv->dirty) @@ -953,12 +1177,19 @@ DRM_DEBUG( "start + used - 4 : %ld\n", start + used - 4); if (buf_priv->currently_mapped == I830_BUF_MAPPED) { - *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE | - sarea_priv->vertex_prim | - ((used/4)-2)); + u32 *vp = buf_priv->virtual; + + vp[0] = (GFX_OP_PRIMITIVE | + sarea_priv->vertex_prim | + ((used/4)-2)); + + if (dev_priv->use_mi_batchbuffer_start) { + vp[used/4] = MI_BATCH_BUFFER_END; + used += 4; + } if (used & 4) { - *(u32 *)((u32)buf_priv->virtual + used) = 0; + vp[used/4] = 0; used += 4; } @@ -978,80 +1209,45 @@ ADVANCE_LP_RING(); } - BEGIN_LP_RING(4); + if (dev_priv->use_mi_batchbuffer_start) { + BEGIN_LP_RING(2); + OUT_RING( MI_BATCH_BUFFER_START | (2<<6) ); + OUT_RING( start | MI_BATCH_NON_SECURE ); + ADVANCE_LP_RING(); + } + else { + BEGIN_LP_RING(4); + OUT_RING( MI_BATCH_BUFFER ); + OUT_RING( start | MI_BATCH_NON_SECURE ); + OUT_RING( start + used - 4 ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); + } - OUT_RING( MI_BATCH_BUFFER ); - OUT_RING( start | MI_BATCH_NON_SECURE ); - OUT_RING( start + used - 4 ); - OUT_RING( 0 ); - ADVANCE_LP_RING(); - } while (++i < nbox); } - BEGIN_LP_RING(10); - OUT_RING( CMD_STORE_DWORD_IDX ); - OUT_RING( 20 ); - OUT_RING( dev_priv->counter ); - OUT_RING( 0 ); - if (discard) { + dev_priv->counter++; + + (void) cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, + I830_BUF_HARDWARE); + + BEGIN_LP_RING(8); + OUT_RING( CMD_STORE_DWORD_IDX ); + OUT_RING( 20 ); + OUT_RING( dev_priv->counter ); OUT_RING( CMD_STORE_DWORD_IDX ); OUT_RING( buf_priv->my_use_idx ); OUT_RING( I830_BUF_FREE ); + OUT_RING( CMD_REPORT_HEAD ); OUT_RING( 0 ); + ADVANCE_LP_RING(); } - - OUT_RING( CMD_REPORT_HEAD ); - OUT_RING( 0 ); - ADVANCE_LP_RING(); -} - -/* Interrupts are only for flushing */ -void i830_dma_service(int irq, void *device, struct pt_regs *regs) -{ - drm_device_t *dev = (drm_device_t *)device; - drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private; - u16 temp; - - temp = I830_READ16(I830REG_INT_IDENTITY_R); - temp = temp & ~(0x6000); - if(temp != 0) I830_WRITE16(I830REG_INT_IDENTITY_R, - temp); /* Clear all interrupts */ - else - return; - - queue_task(&dev->tq, &tq_immediate); - mark_bh(IMMEDIATE_BH); -} - -void DRM(dma_immediate_bh)(void *device) -{ - drm_device_t *dev = (drm_device_t *) device; - drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private; - - atomic_set(&dev_priv->flush_done, 1); - wake_up_interruptible(&dev_priv->flush_queue); } -static inline void i830_dma_emit_flush(drm_device_t *dev) -{ - drm_i830_private_t *dev_priv = dev->dev_private; - RING_LOCALS; - - i830_kernel_lost_context(dev); - BEGIN_LP_RING(2); - OUT_RING( CMD_REPORT_HEAD ); - OUT_RING( GFX_OP_USER_INTERRUPT ); - ADVANCE_LP_RING(); - - i830_wait_ring( dev, dev_priv->ring.Size - 8 ); - atomic_set(&dev_priv->flush_done, 1); - wake_up_interruptible(&dev_priv->flush_queue); -} - -static inline void i830_dma_quiescent_emit(drm_device_t *dev) +void i830_dma_quiescent(drm_device_t *dev) { drm_i830_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -1062,79 +1258,27 @@ OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE ); OUT_RING( CMD_REPORT_HEAD ); OUT_RING( 0 ); - OUT_RING( GFX_OP_USER_INTERRUPT ); + OUT_RING( 0 ); ADVANCE_LP_RING(); - i830_wait_ring( dev, dev_priv->ring.Size - 8 ); - atomic_set(&dev_priv->flush_done, 1); - wake_up_interruptible(&dev_priv->flush_queue); -} - -void i830_dma_quiescent(drm_device_t *dev) -{ - DECLARE_WAITQUEUE(entry, current); - drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private; - unsigned long end; - - if(dev_priv == NULL) { - return; - } - atomic_set(&dev_priv->flush_done, 0); - add_wait_queue(&dev_priv->flush_queue, &entry); - end = jiffies + (HZ*3); - - for (;;) { - current->state = TASK_INTERRUPTIBLE; - i830_dma_quiescent_emit(dev); - if (atomic_read(&dev_priv->flush_done) == 1) break; - if(time_before(end, jiffies)) { - DRM_ERROR("lockup\n"); - break; - } - schedule_timeout(HZ*3); - if (signal_pending(current)) { - break; - } - } - - current->state = TASK_RUNNING; - remove_wait_queue(&dev_priv->flush_queue, &entry); - - return; + i830_wait_ring( dev, dev_priv->ring.Size - 8, __FUNCTION__ ); } static int i830_flush_queue(drm_device_t *dev) { - DECLARE_WAITQUEUE(entry, current); - drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private; + drm_i830_private_t *dev_priv = dev->dev_private; drm_device_dma_t *dma = dev->dma; - unsigned long end; - int i, ret = 0; + int i, ret = 0; + RING_LOCALS; + + i830_kernel_lost_context(dev); - if(dev_priv == NULL) { - return 0; - } - atomic_set(&dev_priv->flush_done, 0); - add_wait_queue(&dev_priv->flush_queue, &entry); - end = jiffies + (HZ*3); - for (;;) { - current->state = TASK_INTERRUPTIBLE; - i830_dma_emit_flush(dev); - if (atomic_read(&dev_priv->flush_done) == 1) break; - if(time_before(end, jiffies)) { - DRM_ERROR("lockup\n"); - break; - } - schedule_timeout(HZ*3); - if (signal_pending(current)) { - ret = -EINTR; /* Can't restart */ - break; - } - } - - current->state = TASK_RUNNING; - remove_wait_queue(&dev_priv->flush_queue, &entry); + BEGIN_LP_RING(2); + OUT_RING( CMD_REPORT_HEAD ); + OUT_RING( 0 ); + ADVANCE_LP_RING(); + i830_wait_ring( dev, dev_priv->ring.Size - 8, __FUNCTION__ ); for (i = 0; i < dma->buf_count; i++) { drm_buf_t *buf = dma->buflist[ i ]; @@ -1146,7 +1290,7 @@ if (used == I830_BUF_HARDWARE) DRM_DEBUG("reclaimed from HARDWARE\n"); if (used == I830_BUF_CLIENT) - DRM_DEBUG("still on client HARDWARE\n"); + DRM_DEBUG("still on client\n"); } return ret; @@ -1185,8 +1329,7 @@ { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; - - DRM_DEBUG("i830_flush_ioctl\n"); + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("i830_flush_ioctl called without lock held\n"); return -EINVAL; @@ -1275,6 +1418,53 @@ return 0; } + + +/* Not sure why this isn't set all the time: + */ +static void i830_do_init_pageflip( drm_device_t *dev ) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("%s\n", __FUNCTION__); + dev_priv->page_flipping = 1; + dev_priv->current_page = 0; + dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; +} + +int i830_do_cleanup_pageflip( drm_device_t *dev ) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("%s\n", __FUNCTION__); + if (dev_priv->current_page != 0) + i830_dma_dispatch_flip( dev ); + + dev_priv->page_flipping = 0; + return 0; +} + +int i830_flip_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i830_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("%s\n", __FUNCTION__); + + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("i830_flip_buf called without lock held\n"); + return -EINVAL; + } + + if (!dev_priv->page_flipping) + i830_do_init_pageflip( dev ); + + i830_dma_dispatch_flip( dev ); + return 0; +} + int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { @@ -1324,46 +1514,80 @@ return retcode; } -int i830_copybuf(struct inode *inode, struct file *filp, unsigned int cmd, +int i830_copybuf(struct inode *inode, + struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + /* Never copy - 2.4.x doesn't need it */ + return 0; +} + +int i830_docopy(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { + return 0; +} + + + +int i830_getparam( struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg ) +{ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; - drm_i830_copy_t d; - drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private; - u32 *hw_status = (u32 *)dev_priv->hw_status_page; - drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *) - dev_priv->sarea_priv; - drm_buf_t *buf; - drm_i830_buf_priv_t *buf_priv; - drm_device_dma_t *dma = dev->dma; + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_getparam_t param; + int value; - if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { - DRM_ERROR("i830_dma called without lock held\n"); + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); return -EINVAL; } - - if (copy_from_user(&d, (drm_i830_copy_t *)arg, sizeof(d))) - return -EFAULT; - - if(d.idx < 0 || d.idx > dma->buf_count) return -EINVAL; - buf = dma->buflist[ d.idx ]; - buf_priv = buf->dev_private; - if (buf_priv->currently_mapped != I830_BUF_MAPPED) return -EPERM; - - if(d.used < 0 || d.used > buf->total) return -EINVAL; - if (copy_from_user(buf_priv->virtual, d.address, d.used)) + if (copy_from_user(¶m, (drm_i830_getparam_t *)arg, sizeof(param) )) return -EFAULT; - sarea_priv->last_dispatch = (int) hw_status[5]; + switch( param.param ) { + case I830_PARAM_IRQ_ACTIVE: + value = dev->irq ? 1 : 0; + break; + default: + return -EINVAL; + } + if ( copy_to_user( param.value, &value, sizeof(int) ) ) { + DRM_ERROR( "copy_to_user\n" ); + return -EFAULT; + } + return 0; } -int i830_docopy(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) + +int i830_setparam( struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg ) { - if(VM_DONTCOPY == 0) return 1; + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_setparam_t param; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + if (copy_from_user(¶m, (drm_i830_setparam_t *)arg, sizeof(param) )) + return -EFAULT; + + switch( param.param ) { + case I830_SETPARAM_USE_MI_BATCHBUFFER_START: + dev_priv->use_mi_batchbuffer_start = param.value; + break; + default: + return -EINVAL; + } + return 0; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/i830_drm.h linux.21pre4-ac2/drivers/char/drm/i830_drm.h --- linux.21pre4/drivers/char/drm/i830_drm.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/i830_drm.h 2003-01-31 11:54:14.000000000 +0000 @@ -3,6 +3,9 @@ /* WARNING: These defines must be the same as what the Xserver uses. * if you change them, you must change the defines in the Xserver. + * + * KW: Actually, you can't ever change them because doing so would + * break backwards compatibility. */ #ifndef _I830_DEFINES_ @@ -18,14 +21,12 @@ #define I830_NR_TEX_REGIONS 64 #define I830_LOG_MIN_TEX_REGION_SIZE 16 -/* if defining I830_ENABLE_4_TEXTURES, do it in i830_3d_reg.h, too */ -#if !defined(I830_ENABLE_4_TEXTURES) +/* KW: These aren't correct but someone set them to two and then + * released the module. Now we can't change them as doing so would + * break backwards compatibility. + */ #define I830_TEXTURE_COUNT 2 -#define I830_TEXBLEND_COUNT 2 /* always same as TEXTURE_COUNT? */ -#else /* defined(I830_ENABLE_4_TEXTURES) */ -#define I830_TEXTURE_COUNT 4 -#define I830_TEXBLEND_COUNT 4 /* always same as TEXTURE_COUNT? */ -#endif /* I830_ENABLE_4_TEXTURES */ +#define I830_TEXBLEND_COUNT I830_TEXTURE_COUNT #define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */ @@ -57,6 +58,7 @@ #define I830_UPLOAD_TEXBLEND_MASK 0xf00000 #define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n)) #define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000 +#define I830_UPLOAD_STIPPLE 0x8000000 /* Indices into buf.Setup where various bits of state are mirrored per * context and per buffer. These can be fired at the card as a unit, @@ -73,7 +75,6 @@ */ #define I830_DESTREG_CBUFADDR 0 -/* Invarient */ #define I830_DESTREG_DBUFADDR 1 #define I830_DESTREG_DV0 2 #define I830_DESTREG_DV1 3 @@ -109,6 +110,13 @@ #define I830_CTXREG_MCSB1 16 #define I830_CTX_SETUP_SIZE 17 +/* 1.3: Stipple state + */ +#define I830_STPREG_ST0 0 +#define I830_STPREG_ST1 1 +#define I830_STP_SETUP_SIZE 2 + + /* Texture state (per tex unit) */ @@ -124,6 +132,18 @@ #define I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS */ #define I830_TEX_SETUP_SIZE 10 +#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */ +#define I830_TEXREG_TM0S0 1 +#define I830_TEXREG_TM0S1 2 +#define I830_TEXREG_TM0S2 3 +#define I830_TEXREG_TM0S3 4 +#define I830_TEXREG_TM0S4 5 +#define I830_TEXREG_NOP0 6 /* noop */ +#define I830_TEXREG_NOP1 7 /* noop */ +#define I830_TEXREG_NOP2 8 /* noop */ +#define __I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS -- shared */ +#define __I830_TEX_SETUP_SIZE 10 + #define I830_FRONT 0x1 #define I830_BACK 0x2 #define I830_DEPTH 0x4 @@ -199,8 +219,53 @@ int ctxOwner; /* last context to upload state */ int vertex_prim; + + int pf_enabled; /* is pageflipping allowed? */ + int pf_active; + int pf_current_page; /* which buffer is being displayed? */ + + int perf_boxes; /* performance boxes to be displayed */ + + /* Here's the state for texunits 2,3: + */ + unsigned int TexState2[I830_TEX_SETUP_SIZE]; + unsigned int TexBlendState2[I830_TEXBLEND_SIZE]; + unsigned int TexBlendStateWordsUsed2; + + unsigned int TexState3[I830_TEX_SETUP_SIZE]; + unsigned int TexBlendState3[I830_TEXBLEND_SIZE]; + unsigned int TexBlendStateWordsUsed3; + + unsigned int StippleState[I830_STP_SETUP_SIZE]; } drm_i830_sarea_t; +/* Flags for perf_boxes + */ +#define I830_BOX_RING_EMPTY 0x1 /* populated by kernel */ +#define I830_BOX_FLIP 0x2 /* populated by kernel */ +#define I830_BOX_WAIT 0x4 /* populated by kernel & client */ +#define I830_BOX_TEXTURE_LOAD 0x8 /* populated by kernel */ +#define I830_BOX_LOST_CONTEXT 0x10 /* populated by client */ + + +/* I830 specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_IOCTL_I830_INIT DRM_IOW( 0x40, drm_i830_init_t) +#define DRM_IOCTL_I830_VERTEX DRM_IOW( 0x41, drm_i830_vertex_t) +#define DRM_IOCTL_I830_CLEAR DRM_IOW( 0x42, drm_i830_clear_t) +#define DRM_IOCTL_I830_FLUSH DRM_IO ( 0x43) +#define DRM_IOCTL_I830_GETAGE DRM_IO ( 0x44) +#define DRM_IOCTL_I830_GETBUF DRM_IOWR(0x45, drm_i830_dma_t) +#define DRM_IOCTL_I830_SWAP DRM_IO ( 0x46) +#define DRM_IOCTL_I830_COPY DRM_IOW( 0x47, drm_i830_copy_t) +#define DRM_IOCTL_I830_DOCOPY DRM_IO ( 0x48) +#define DRM_IOCTL_I830_FLIP DRM_IO ( 0x49) +#define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(0x4a, drm_i830_irq_emit_t) +#define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( 0x4b, drm_i830_irq_wait_t) +#define DRM_IOCTL_I830_GETPARAM DRM_IOWR(0x4c, drm_i830_getparam_t) +#define DRM_IOCTL_I830_SETPARAM DRM_IOWR(0x4d, drm_i830_setparam_t) + typedef struct _drm_i830_clear { int clear_color; int clear_depth; @@ -235,4 +300,36 @@ int granted; } drm_i830_dma_t; + +/* 1.3: Userspace can request & wait on irq's: + */ +typedef struct drm_i830_irq_emit { + int *irq_seq; +} drm_i830_irq_emit_t; + +typedef struct drm_i830_irq_wait { + int irq_seq; +} drm_i830_irq_wait_t; + + +/* 1.3: New ioctl to query kernel params: + */ +#define I830_PARAM_IRQ_ACTIVE 1 + +typedef struct drm_i830_getparam { + int param; + int *value; +} drm_i830_getparam_t; + + +/* 1.3: New ioctl to set kernel params: + */ +#define I830_SETPARAM_USE_MI_BATCHBUFFER_START 1 + +typedef struct drm_i830_setparam { + int param; + int value; +} drm_i830_setparam_t; + + #endif /* _I830_DRM_H_ */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/i830_drv.c linux.21pre4-ac2/drivers/char/drm/i830_drv.c --- linux.21pre4/drivers/char/drm/i830_drv.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/i830_drv.c 2003-01-06 17:28:08.000000000 +0000 @@ -29,41 +29,16 @@ * Jeff Hartmann * Gareth Hughes * Abraham vd Merwe + * Keith Whitwell */ #include #include "i830.h" #include "drmP.h" +#include "drm.h" +#include "i830_drm.h" #include "i830_drv.h" -#define DRIVER_AUTHOR "VA Linux Systems Inc." - -#define DRIVER_NAME "i830" -#define DRIVER_DESC "Intel 830M" -#define DRIVER_DATE "20011004" - -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 2 -#define DRIVER_PATCHLEVEL 0 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_INIT)] = { i830_dma_init, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_VERTEX)] = { i830_dma_vertex, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_CLEAR)] = { i830_clear_bufs, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_FLUSH)] = { i830_flush_ioctl, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_GETAGE)] = { i830_getage, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_GETBUF)] = { i830_getbuf, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_SWAP)] = { i830_swap_bufs, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_COPY)] = { i830_copybuf, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_I830_DOCOPY)] = { i830_docopy, 1, 0 }, - -#define __HAVE_COUNTERS 4 -#define __HAVE_COUNTER6 _DRM_STAT_IRQ -#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY -#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY -#define __HAVE_COUNTER9 _DRM_STAT_DMA - - #include "drm_agpsupport.h" #include "drm_auth.h" #include "drm_bufs.h" @@ -72,25 +47,6 @@ #include "drm_drawable.h" #include "drm_drv.h" -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init i830_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", i830_options ); -#endif - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/i830_drv.h linux.21pre4-ac2/drivers/char/drm/i830_drv.h --- linux.21pre4/drivers/char/drm/i830_drv.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/i830_drv.h 2003-01-31 11:54:14.000000000 +0000 @@ -61,24 +61,36 @@ drm_i830_sarea_t *sarea_priv; drm_i830_ring_buffer_t ring; - u8 *hw_status_page; + unsigned long hw_status_page; unsigned long counter; - - dma_addr_t dma_status_page; - atomic_t flush_done; - wait_queue_head_t flush_queue; /* Processes waiting until flush */ + dma_addr_t dma_status_page; + drm_buf_t *mmap_buffer; u32 front_di1, back_di1, zi1; int back_offset; int depth_offset; + int front_offset; int w, h; int pitch; int back_pitch; int depth_pitch; unsigned int cpp; + + int do_boxes; + int dma_used; + + int current_page; + int page_flipping; + + wait_queue_head_t irq_queue; + atomic_t irq_received; + atomic_t irq_emitted; + + int use_mi_batchbuffer_start; + } drm_i830_private_t; /* i830_dma.c */ @@ -109,24 +121,81 @@ extern int i830_clear_bufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -#define I830_VERBOSE 0 +extern int i830_flip_bufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + +extern int i830_getparam( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); + +extern int i830_setparam( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); + +/* i830_irq.c */ +extern int i830_irq_emit( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int i830_irq_wait( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ); +extern int i830_wait_irq(drm_device_t *dev, int irq_nr); +extern int i830_emit_irq(drm_device_t *dev); + #define I830_BASE(reg) ((unsigned long) \ dev_priv->mmio_map->handle) #define I830_ADDR(reg) (I830_BASE(reg) + reg) -#define I830_DEREF(reg) *(__volatile__ int *)I830_ADDR(reg) -#define I830_READ(reg) I830_DEREF(reg) -#define I830_WRITE(reg,val) do { I830_DEREF(reg) = val; } while (0) +#define I830_DEREF(reg) *(__volatile__ unsigned int *)I830_ADDR(reg) +#define I830_READ(reg) readl((volatile u32 *)I830_ADDR(reg)) +#define I830_WRITE(reg,val) writel(val, (volatile u32 *)I830_ADDR(reg)) #define I830_DEREF16(reg) *(__volatile__ u16 *)I830_ADDR(reg) #define I830_READ16(reg) I830_DEREF16(reg) #define I830_WRITE16(reg,val) do { I830_DEREF16(reg) = val; } while (0) + + +#define I830_VERBOSE 0 + +#define RING_LOCALS unsigned int outring, ringmask, outcount; \ + volatile char *virt; + +#define BEGIN_LP_RING(n) do { \ + if (I830_VERBOSE) \ + printk("BEGIN_LP_RING(%d) in %s\n", \ + n, __FUNCTION__); \ + if (dev_priv->ring.space < n*4) \ + i830_wait_ring(dev, n*4, __FUNCTION__); \ + outcount = 0; \ + outring = dev_priv->ring.tail; \ + ringmask = dev_priv->ring.tail_mask; \ + virt = dev_priv->ring.virtual_start; \ +} while (0) + + +#define OUT_RING(n) do { \ + if (I830_VERBOSE) printk(" OUT_RING %x\n", (int)(n)); \ + *(volatile unsigned int *)(virt + outring) = n; \ + outcount++; \ + outring += 4; \ + outring &= ringmask; \ +} while (0) + +#define ADVANCE_LP_RING() do { \ + if (I830_VERBOSE) printk("ADVANCE_LP_RING %x\n", outring); \ + dev_priv->ring.tail = outring; \ + dev_priv->ring.space -= outcount * 4; \ + I830_WRITE(LP_RING + RING_TAIL, outring); \ +} while(0) + +extern int i830_wait_ring(drm_device_t *dev, int n, const char *caller); + + #define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) #define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) #define CMD_REPORT_HEAD (7<<23) #define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1) #define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1) +#define STATE3D_LOAD_STATE_IMMEDIATE_2 ((0x3<<29)|(0x1d<<24)|(0x03<<16)) +#define LOAD_TEXTURE_MAP0 (1<<11) + #define INST_PARSER_CLIENT 0x00000000 #define INST_OP_FLUSH 0x02000000 #define INST_FLUSH_MAP_CACHE 0x00000001 @@ -142,18 +211,21 @@ #define I830REG_INT_MASK_R 0x020a8 #define I830REG_INT_ENABLE_R 0x020a0 +#define I830_IRQ_RESERVED ((1<<13)|(3<<2)) + + #define LP_RING 0x2030 #define HP_RING 0x2040 #define RING_TAIL 0x00 -#define TAIL_ADDR 0x000FFFF8 +#define TAIL_ADDR 0x001FFFF8 #define RING_HEAD 0x04 #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 #define HEAD_ADDR 0x001FFFFC #define RING_START 0x08 -#define START_ADDR 0x00FFFFF8 +#define START_ADDR 0x0xFFFFF000 #define RING_LEN 0x0C -#define RING_NR_PAGES 0x000FF000 +#define RING_NR_PAGES 0x001FF000 #define RING_REPORT_MASK 0x00000006 #define RING_REPORT_64K 0x00000002 #define RING_REPORT_128K 0x00000004 @@ -184,6 +256,12 @@ #define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) +#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) +#define ASYNC_FLIP (1<<22) + +#define CMD_3D (0x3<<29) +#define STATE3D_CONST_BLEND_COLOR_CMD (CMD_3D|(0x1d<<24)|(0x88<<16)) +#define STATE3D_MAP_COORD_SETBIND_CMD (CMD_3D|(0x1d<<24)|(0x02<<16)) #define BR00_BITBLT_CLIENT 0x40000000 #define BR00_OP_COLOR_BLT 0x10000000 @@ -208,8 +286,15 @@ #define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) #define MI_BATCH_BUFFER ((0x30<<23)|1) +#define MI_BATCH_BUFFER_START (0x31<<23) +#define MI_BATCH_BUFFER_END (0xA<<23) #define MI_BATCH_NON_SECURE (1) +#define MI_WAIT_FOR_EVENT ((0x3<<23)) +#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) +#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) + +#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23)) #endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/i830.h linux.21pre4-ac2/drivers/char/drm/i830.h --- linux.21pre4/drivers/char/drm/i830.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/i830.h 2003-01-31 11:54:14.000000000 +0000 @@ -41,6 +41,48 @@ #define __HAVE_MTRR 1 #define __HAVE_CTX_BITMAP 1 +#define DRIVER_AUTHOR "VA Linux Systems Inc." + +#define DRIVER_NAME "i830" +#define DRIVER_DESC "Intel 830M" +#define DRIVER_DATE "20021108" + +/* Interface history: + * + * 1.1: Original. + * 1.2: ? + * 1.3: New irq emit/wait ioctls. + * New pageflip ioctl. + * New getparam ioctl. + * State for texunits 3&4 in sarea. + * New (alternative) layout for texture state. + */ +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 3 +#define DRIVER_PATCHLEVEL 2 + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_INIT)] = { i830_dma_init, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_VERTEX)] = { i830_dma_vertex, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_CLEAR)] = { i830_clear_bufs, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_FLUSH)] = { i830_flush_ioctl, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_GETAGE)] = { i830_getage, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_GETBUF)] = { i830_getbuf, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_SWAP)] = { i830_swap_bufs, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_COPY)] = { i830_copybuf, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_DOCOPY)] = { i830_docopy, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_FLIP)] = { i830_flip_bufs, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_IRQ_EMIT)] = { i830_irq_emit, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_IRQ_WAIT)] = { i830_irq_wait, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_GETPARAM)] = { i830_getparam, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_I830_SETPARAM)] = { i830_setparam, 1, 0 } + +#define __HAVE_COUNTERS 4 +#define __HAVE_COUNTER6 _DRM_STAT_IRQ +#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY +#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY +#define __HAVE_COUNTER9 _DRM_STAT_DMA + /* Driver customization: */ #define __HAVE_RELEASE 1 @@ -60,51 +102,50 @@ i830_dma_quiescent( dev ); \ } while (0) + +/* Driver will work either way: IRQ's save cpu time when waiting for + * the card, but are subject to subtle interactions between bios, + * hardware and the driver. + */ +#define USE_IRQS 0 + + +#if USE_IRQS #define __HAVE_DMA_IRQ 1 -#define __HAVE_DMA_IRQ_BH 1 -#define __HAVE_SHARED_IRQ 1 -#define DRIVER_PREINSTALL() do { \ - drm_i830_private_t *dev_priv = \ - (drm_i830_private_t *)dev->dev_private; \ - u16 tmp; \ - tmp = I830_READ16( I830REG_HWSTAM ); \ - tmp = tmp & 0x6000; \ - I830_WRITE16( I830REG_HWSTAM, tmp ); \ - \ - tmp = I830_READ16( I830REG_INT_MASK_R ); \ - tmp = tmp & 0x6000; /* Unmask interrupts */ \ - I830_WRITE16( I830REG_INT_MASK_R, tmp ); \ - tmp = I830_READ16( I830REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; /* Disable all interrupts */ \ - I830_WRITE16( I830REG_INT_ENABLE_R, tmp ); \ -} while (0) +#define __HAVE_SHARED_IRQ 1 -#define DRIVER_POSTINSTALL() do { \ - drm_i830_private_t *dev_priv = \ +#define DRIVER_PREINSTALL() do { \ + drm_i830_private_t *dev_priv = \ (drm_i830_private_t *)dev->dev_private; \ - u16 tmp; \ - tmp = I830_READ16( I830REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; \ - tmp = tmp | 0x0003; /* Enable bp & user interrupts */ \ - I830_WRITE16( I830REG_INT_ENABLE_R, tmp ); \ + \ + I830_WRITE16( I830REG_HWSTAM, 0xffff ); \ + I830_WRITE16( I830REG_INT_MASK_R, 0x0 ); \ + I830_WRITE16( I830REG_INT_ENABLE_R, 0x0 ); \ } while (0) -#define DRIVER_UNINSTALL() do { \ - drm_i830_private_t *dev_priv = \ - (drm_i830_private_t *)dev->dev_private; \ - u16 tmp; \ - if ( dev_priv ) { \ - tmp = I830_READ16( I830REG_INT_IDENTITY_R ); \ - tmp = tmp & ~(0x6000); /* Clear all interrupts */ \ - if ( tmp != 0 ) \ - I830_WRITE16( I830REG_INT_IDENTITY_R, tmp ); \ - \ - tmp = I830_READ16( I830REG_INT_ENABLE_R ); \ - tmp = tmp & 0x6000; /* Disable all interrupts */ \ - I830_WRITE16( I830REG_INT_ENABLE_R, tmp ); \ - } \ + +#define DRIVER_POSTINSTALL() do { \ + drm_i830_private_t *dev_priv = \ + (drm_i830_private_t *)dev->dev_private; \ + I830_WRITE16( I830REG_INT_ENABLE_R, 0x2 ); \ + atomic_set(&dev_priv->irq_received, 0); \ + atomic_set(&dev_priv->irq_emitted, 0); \ + init_waitqueue_head(&dev_priv->irq_queue); \ } while (0) + +/* This gets called too late to be useful: dev_priv has already been + * freed. + */ +#define DRIVER_UNINSTALL() do { \ +} while (0) + +#else +#define __HAVE_DMA_IRQ 0 +#endif + + + /* Buffer customization: */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/i830_irq.c linux.21pre4-ac2/drivers/char/drm/i830_irq.c --- linux.21pre4/drivers/char/drm/i830_irq.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/char/drm/i830_irq.c 2003-01-31 11:54:14.000000000 +0000 @@ -0,0 +1,178 @@ +/* i830_dma.c -- DMA support for the I830 -*- linux-c -*- + * + * Copyright 2002 Tungsten Graphics, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Keith Whitwell + * + */ + + +#include "i830.h" +#include "drmP.h" +#include "drm.h" +#include "i830_drm.h" +#include "i830_drv.h" +#include /* For task queue support */ +#include + + +void DRM(dma_service)(int irq, void *device, struct pt_regs *regs) +{ + drm_device_t *dev = (drm_device_t *)device; + drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private; + u16 temp; + + temp = I830_READ16(I830REG_INT_IDENTITY_R); + printk("%s: %x\n", __FUNCTION__, temp); + + if(temp == 0) + return; + + I830_WRITE16(I830REG_INT_IDENTITY_R, temp); + + if (temp & 2) { + atomic_inc(&dev_priv->irq_received); + wake_up_interruptible(&dev_priv->irq_queue); + } +} + + +int i830_emit_irq(drm_device_t *dev) +{ + drm_i830_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + DRM_DEBUG("%s\n", __FUNCTION__); + + atomic_inc(&dev_priv->irq_emitted); + + BEGIN_LP_RING(2); + OUT_RING( 0 ); + OUT_RING( GFX_OP_USER_INTERRUPT ); + ADVANCE_LP_RING(); + + return atomic_read(&dev_priv->irq_emitted); +} + + +int i830_wait_irq(drm_device_t *dev, int irq_nr) +{ + drm_i830_private_t *dev_priv = + (drm_i830_private_t *)dev->dev_private; + DECLARE_WAITQUEUE(entry, current); + unsigned long end = jiffies + HZ*3; + int ret = 0; + + DRM_DEBUG("%s\n", __FUNCTION__); + + if (atomic_read(&dev_priv->irq_received) >= irq_nr) + return 0; + + dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT; + + add_wait_queue(&dev_priv->irq_queue, &entry); + + for (;;) { + current->state = TASK_INTERRUPTIBLE; + if (atomic_read(&dev_priv->irq_received) >= irq_nr) + break; + if (time_after(jiffies, end)) { + DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n", + I830_READ16( I830REG_INT_IDENTITY_R ), + I830_READ16( I830REG_INT_MASK_R ), + I830_READ16( I830REG_INT_ENABLE_R ), + I830_READ16( I830REG_HWSTAM )); + + ret = -EBUSY; /* Lockup? Missed irq? */ + break; + } + schedule_timeout(HZ*3); + if (signal_pending(current)) { + ret = -EINTR; + break; + } + } + + current->state = TASK_RUNNING; + remove_wait_queue(&dev_priv->irq_queue, &entry); + return ret; +} + + +/* Needs the lock as it touches the ring. + */ +int i830_irq_emit( struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_irq_emit_t emit; + int result; + + if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("i830_irq_emit called without lock held\n"); + return -EINVAL; + } + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + if (copy_from_user( &emit, (drm_i830_irq_emit_t *)arg, sizeof(emit) )) + return -EFAULT; + + result = i830_emit_irq( dev ); + + if ( copy_to_user( emit.irq_seq, &result, sizeof(int) ) ) { + DRM_ERROR( "copy_to_user\n" ); + return -EFAULT; + } + + return 0; +} + + +/* Doesn't need the hardware lock. + */ +int i830_irq_wait( struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_i830_private_t *dev_priv = dev->dev_private; + drm_i830_irq_wait_t irqwait; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + if (copy_from_user( &irqwait, (drm_i830_irq_wait_t *)arg, + sizeof(irqwait) )) + return -EFAULT; + + return i830_wait_irq( dev, irqwait.irq_seq ); +} + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/Makefile linux.21pre4-ac2/drivers/char/drm/Makefile --- linux.21pre4/drivers/char/drm/Makefile 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/Makefile 2003-01-31 11:54:14.000000000 +0000 @@ -10,9 +10,9 @@ r128-objs := r128_drv.o r128_cce.o r128_state.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o i810-objs := i810_drv.o i810_dma.o -i830-objs := i830_drv.o i830_dma.o +i830-objs := i830_drv.o i830_dma.o i830_irq.o -radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o +radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o ffb-objs := ffb_drv.o ffb_context.o sis-objs := sis_drv.o sis_ds.o sis_mm.o diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/mga_dma.c linux.21pre4-ac2/drivers/char/drm/mga_dma.c --- linux.21pre4/drivers/char/drm/mga_dma.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/mga_dma.c 2003-01-06 17:28:08.000000000 +0000 @@ -35,10 +35,11 @@ #include "mga.h" #include "drmP.h" +#include "drm.h" +#include "mga_drm.h" #include "mga_drv.h" - -#include /* For task queue support */ -#include +#include +#include "drm_os_linux.h" #define MGA_DEFAULT_USEC_TIMEOUT 10000 #define MGA_FREELIST_DEBUG 0 @@ -52,7 +53,7 @@ { u32 status = 0; int i; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { status = MGA_READ( MGA_STATUS ) & MGA_ENGINE_IDLE_MASK; @@ -74,7 +75,7 @@ { u32 status = 0; int i; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { status = MGA_READ( MGA_STATUS ) & MGA_DMA_IDLE_MASK; @@ -93,7 +94,7 @@ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_primary_buffer_t *primary = &dev_priv->prim; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); /* The primary DMA stream should look like new right about now. */ @@ -114,7 +115,7 @@ int mga_do_engine_reset( drm_mga_private_t *dev_priv ) { - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); /* Okay, so we've completely screwed up and locked the engine. * How about we clean up after ourselves? @@ -160,8 +161,8 @@ u32 head, tail; u32 status = 0; int i; - DMA_LOCALS; - DRM_DEBUG( "%s:\n", __FUNCTION__ ); + DMA_LOCALS; + DRM_DEBUG( "\n" ); /* We need to wait so that we can do an safe flush */ for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { @@ -207,7 +208,7 @@ mga_flush_write_combine(); MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); - DRM_DEBUG( "%s: done.\n", __FUNCTION__ ); + DRM_DEBUG( "done.\n" ); } void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv ) @@ -215,7 +216,7 @@ drm_mga_primary_buffer_t *primary = &dev_priv->prim; u32 head, tail; DMA_LOCALS; - DRM_DEBUG( "%s:\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); BEGIN_DMA_WRAP(); @@ -250,7 +251,7 @@ MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); set_bit( 0, &primary->wrapped ); - DRM_DEBUG( "%s: done.\n", __FUNCTION__ ); + DRM_DEBUG( "done.\n" ); } void mga_do_dma_wrap_end( drm_mga_private_t *dev_priv ) @@ -258,7 +259,7 @@ drm_mga_primary_buffer_t *primary = &dev_priv->prim; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; u32 head = dev_priv->primary->offset; - DRM_DEBUG( "%s:\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); sarea_priv->last_wrap++; DRM_DEBUG( " wrap = %d\n", sarea_priv->last_wrap ); @@ -267,7 +268,7 @@ MGA_WRITE( MGA_PRIMADDRESS, head | MGA_DMA_GENERAL ); clear_bit( 0, &primary->wrapped ); - DRM_DEBUG( "%s: done.\n", __FUNCTION__ ); + DRM_DEBUG( "done.\n" ); } @@ -307,8 +308,7 @@ drm_mga_buf_priv_t *buf_priv; drm_mga_freelist_t *entry; int i; - DRM_DEBUG( "%s: count=%d\n", - __FUNCTION__, dma->buf_count ); + DRM_DEBUG( "count=%d\n", dma->buf_count ); dev_priv->head = DRM(alloc)( sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER ); @@ -354,7 +354,7 @@ drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_freelist_t *entry; drm_mga_freelist_t *next; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); entry = dev_priv->head; while ( entry ) { @@ -392,7 +392,7 @@ drm_mga_freelist_t *prev; drm_mga_freelist_t *tail = dev_priv->tail; u32 head, wrap; - DRM_DEBUG( "%s:\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); head = MGA_READ( MGA_PRIMADDRESS ); wrap = dev_priv->sarea_priv->last_wrap; @@ -424,8 +424,7 @@ drm_mga_buf_priv_t *buf_priv = buf->dev_private; drm_mga_freelist_t *head, *entry, *prev; - DRM_DEBUG( "%s: age=0x%06lx wrap=%d\n", - __FUNCTION__, + DRM_DEBUG( "age=0x%06lx wrap=%d\n", buf_priv->list_entry->age.head - dev_priv->primary->offset, buf_priv->list_entry->age.wrap ); @@ -458,9 +457,8 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) { drm_mga_private_t *dev_priv; - struct list_head *list; int ret; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); dev_priv = DRM(alloc)( sizeof(drm_mga_private_t), DRM_MEM_DRIVER ); if ( !dev_priv ) @@ -494,15 +492,8 @@ dev_priv->texture_offset = init->texture_offset[0]; dev_priv->texture_size = init->texture_size[0]; - list_for_each( list, &dev->maplist->head ) { - drm_map_list_t *entry = (drm_map_list_t *)list; - if ( entry->map && - entry->map->type == _DRM_SHM && - (entry->map->flags & _DRM_CONTAINS_LOCK) ) { - dev_priv->sarea = entry->map; - break; - } - } + DRM_GETSAREA(); + if(!dev_priv->sarea) { DRM_ERROR( "failed to find sarea!\n" ); /* Assign dev_private so we can do cleanup. */ @@ -626,8 +617,6 @@ dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE; - spin_lock_init( &dev_priv->prim.list_lock ); - dev_priv->prim.status[0] = dev_priv->primary->offset; dev_priv->prim.status[1] = 0; @@ -650,7 +639,7 @@ int mga_do_cleanup_dma( drm_device_t *dev ) { - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); if ( dev->dev_private ) { drm_mga_private_t *dev_priv = dev->dev_private; @@ -725,7 +714,7 @@ #if MGA_DMA_DEBUG int ret = mga_do_wait_for_idle( dev_priv ); if ( ret < 0 ) - DRM_INFO( __FUNCTION__": -EBUSY\n" ); + DRM_INFO( "%s: -EBUSY\n", __FUNCTION__ ); return ret; #else return mga_do_wait_for_idle( dev_priv ); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/mga_drm.h linux.21pre4-ac2/drivers/char/drm/mga_drm.h --- linux.21pre4/drivers/char/drm/mga_drm.h 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/mga_drm.h 2003-01-06 17:28:08.000000000 +0000 @@ -225,6 +225,20 @@ /* WARNING: If you change any of these defines, make sure to change the * defines in the Xserver file (xf86drmMga.h) */ + +/* MGA specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t) +#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x41, drm_lock_t) +#define DRM_IOCTL_MGA_RESET DRM_IO( 0x42) +#define DRM_IOCTL_MGA_SWAP DRM_IO( 0x43) +#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x44, drm_mga_clear_t) +#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x45, drm_mga_vertex_t) +#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t) +#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x47, drm_mga_iload_t) +#define DRM_IOCTL_MGA_BLIT DRM_IOW( 0x48, drm_mga_blit_t) + typedef struct _drm_mga_warp_index { int installed; unsigned long phys_addr; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/mga_drv.c linux.21pre4-ac2/drivers/char/drm/mga_drv.c --- linux.21pre4/drivers/char/drm/mga_drv.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/mga_drv.c 2003-01-06 17:28:08.000000000 +0000 @@ -32,37 +32,9 @@ #include #include "mga.h" #include "drmP.h" +#include "drm.h" +#include "mga_drm.h" #include "mga_drv.h" - -#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." - -#define DRIVER_NAME "mga" -#define DRIVER_DESC "Matrox G200/G400" -#define DRIVER_DATE "20010321" - -#define DRIVER_MAJOR 3 -#define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 2 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { mga_dma_buffers, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_INIT)] = { mga_dma_init, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_FLUSH)] = { mga_dma_flush, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_RESET)] = { mga_dma_reset, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_dma_swap, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_dma_clear, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_VERTEX)] = { mga_dma_vertex, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_INDICES)] = { mga_dma_indices, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_dma_iload, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_MGA_BLIT)] = { mga_dma_blit, 1, 0 }, - - -#define __HAVE_COUNTERS 3 -#define __HAVE_COUNTER6 _DRM_STAT_IRQ -#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY -#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY - - #include "drm_agpsupport.h" #include "drm_auth.h" #include "drm_bufs.h" @@ -70,27 +42,6 @@ #include "drm_dma.h" #include "drm_drawable.h" #include "drm_drv.h" - -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init mga_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", mga_options ); -#endif - - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/mga_drv.h linux.21pre4-ac2/drivers/char/drm/mga_drv.h --- linux.21pre4/drivers/char/drm/mga_drv.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/mga_drv.h 2003-01-06 17:28:08.000000000 +0000 @@ -46,8 +46,6 @@ u32 last_wrap; u32 high_mark; - - spinlock_t list_lock; } drm_mga_primary_buffer_t; typedef struct drm_mga_freelist { @@ -257,7 +255,7 @@ #define BEGIN_DMA_WRAP() \ do { \ if ( MGA_VERBOSE ) { \ - DRM_INFO( "BEGIN_DMA() in %s\n", __FUNCTION__ ); \ + DRM_INFO( "BEGIN_DMA() in %s\n", __FUNCTION__ ); \ DRM_INFO( " space=0x%x\n", dev_priv->prim.space ); \ } \ prim = dev_priv->prim.start; \ @@ -276,7 +274,7 @@ #define FLUSH_DMA() \ do { \ if ( 0 ) { \ - DRM_INFO( "%s:\n" , __FUNCTION__); \ + DRM_INFO( "%s:\n", __FUNCTION__ ); \ DRM_INFO( " tail=0x%06x head=0x%06lx\n", \ dev_priv->prim.tail, \ MGA_READ( MGA_PRIMADDRESS ) - \ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/mga.h linux.21pre4-ac2/drivers/char/drm/mga.h --- linux.21pre4/drivers/char/drm/mga.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/mga.h 2003-01-06 17:28:08.000000000 +0000 @@ -41,6 +41,33 @@ #define __HAVE_MTRR 1 #define __HAVE_CTX_BITMAP 1 +#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." + +#define DRIVER_NAME "mga" +#define DRIVER_DESC "Matrox G200/G400" +#define DRIVER_DATE "20010321" + +#define DRIVER_MAJOR 3 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 2 + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { mga_dma_buffers, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_INIT)] = { mga_dma_init, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_FLUSH)] = { mga_dma_flush, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_RESET)] = { mga_dma_reset, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_dma_swap, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_dma_clear, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_VERTEX)] = { mga_dma_vertex, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_INDICES)] = { mga_dma_indices, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_dma_iload, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_MGA_BLIT)] = { mga_dma_blit, 1, 0 }, + +#define __HAVE_COUNTERS 3 +#define __HAVE_COUNTER6 _DRM_STAT_IRQ +#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY +#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY + /* Driver customization: */ #define DRIVER_PRETAKEDOWN() do { \ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/mga_state.c linux.21pre4-ac2/drivers/char/drm/mga_state.c --- linux.21pre4/drivers/char/drm/mga_state.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/mga_state.c 2003-01-06 17:28:08.000000000 +0000 @@ -34,8 +34,9 @@ #include "mga.h" #include "drmP.h" -#include "mga_drv.h" #include "drm.h" +#include "mga_drm.h" +#include "mga_drv.h" /* ================================================================ @@ -512,7 +513,7 @@ int nbox = sarea_priv->nbox; int i; DMA_LOCALS; - DRM_DEBUG("%s:\n" , __FUNCTION__); + DRM_DEBUG( "\n" ); BEGIN_DMA( 1 ); @@ -606,7 +607,7 @@ int nbox = sarea_priv->nbox; int i; DMA_LOCALS; - DRM_DEBUG( "%s:\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); sarea_priv->last_frame.head = dev_priv->prim.tail; sarea_priv->last_frame.wrap = dev_priv->prim.last_wrap; @@ -760,8 +761,7 @@ u32 srcorg = buf->bus_address | MGA_SRCACC_AGP | MGA_SRCMAP_SYSMEM; u32 y2; DMA_LOCALS; - DRM_DEBUG( "%s: buf=%d used=%d\n", - __FUNCTION__, buf->idx, buf->used ); + DRM_DEBUG( "buf=%d used=%d\n", buf->idx, buf->used ); y2 = length / 64; @@ -815,7 +815,7 @@ int nbox = sarea_priv->nbox; u32 scandir = 0, i; DMA_LOCALS; - DRM_DEBUG( "%s:\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); BEGIN_DMA( 4 + nbox ); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/mga_warp.c linux.21pre4-ac2/drivers/char/drm/mga_warp.c --- linux.21pre4/drivers/char/drm/mga_warp.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/mga_warp.c 2003-01-06 17:28:08.000000000 +0000 @@ -27,8 +27,11 @@ * Gareth Hughes */ + #include "mga.h" #include "drmP.h" +#include "drm.h" +#include "mga_drm.h" #include "mga_drv.h" #include "mga_ucode.h" diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/r128_cce.c linux.21pre4-ac2/drivers/char/drm/r128_cce.c --- linux.21pre4/drivers/char/drm/r128_cce.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/r128_cce.c 2003-01-06 17:28:08.000000000 +0000 @@ -30,14 +30,14 @@ #include "r128.h" #include "drmP.h" +#include "drm.h" +#include "r128_drm.h" #include "r128_drv.h" - -#include /* For task queue support */ -#include +#include "drm_os_linux.h" +#include #define R128_FIFO_DEBUG 0 - /* CCE microcode (from ATI) */ static u32 r128_cce_microcode[] = { 0, 276838400, 0, 268449792, 2, 142, 2, 145, 0, 1076765731, 0, @@ -83,6 +83,7 @@ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; +int r128_do_wait_for_idle( drm_r128_private_t *dev_priv ); int R128_READ_PLL(drm_device_t *dev, int addr) { @@ -131,7 +132,7 @@ } #if R128_FIFO_DEBUG - DRM_ERROR( "%s failed!\n", __FUNCTION__ ); + DRM_ERROR( "failed!\n" ); #endif return -EBUSY; } @@ -147,7 +148,7 @@ } #if R128_FIFO_DEBUG - DRM_ERROR( "%s failed!\n", __FUNCTION__ ); + DRM_ERROR( "failed!\n" ); #endif return -EBUSY; } @@ -157,7 +158,7 @@ int i, ret; ret = r128_do_wait_for_fifo( dev_priv, 64 ); - if ( ret < 0 ) return ret; + if ( ret ) return ret; for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { if ( !(R128_READ( R128_GUI_STAT ) & R128_GUI_ACTIVE) ) { @@ -168,7 +169,7 @@ } #if R128_FIFO_DEBUG - DRM_ERROR( "%s failed!\n", __FUNCTION__ ); + DRM_ERROR( "failed!\n" ); #endif return -EBUSY; } @@ -183,7 +184,7 @@ { int i; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); r128_do_wait_for_idle( dev_priv ); @@ -319,7 +320,7 @@ u32 ring_start; u32 tmp; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); /* The manual (p. 2) says this address is in "VM space". This * means it's an offset from the start of AGP space. @@ -351,8 +352,8 @@ R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR, entry->busaddr[page_ofs]); - DRM_DEBUG( "ring rptr: offset=0x%08llx handle=0x%08lx\n", - (u64)entry->busaddr[page_ofs], + DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n", + entry->busaddr[page_ofs], entry->handle + tmp_ofs ); } @@ -374,9 +375,8 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init ) { drm_r128_private_t *dev_priv; - struct list_head *list; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); dev_priv = DRM(alloc)( sizeof(drm_r128_private_t), DRM_MEM_DRIVER ); if ( dev_priv == NULL ) @@ -481,15 +481,8 @@ dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch/8) << 21) | (dev_priv->span_offset >> 5)); - list_for_each(list, &dev->maplist->head) { - drm_map_list_t *r_list = (drm_map_list_t *)list; - if( r_list->map && - r_list->map->type == _DRM_SHM && - r_list->map->flags & _DRM_CONTAINS_LOCK ) { - dev_priv->sarea = r_list->map; - break; - } - } + DRM_GETSAREA(); + if(!dev_priv->sarea) { DRM_ERROR("could not find sarea!\n"); dev->dev_private = (void *)dev_priv; @@ -622,16 +615,20 @@ if ( dev->dev_private ) { drm_r128_private_t *dev_priv = dev->dev_private; +#if __REALLY_HAVE_SG if ( !dev_priv->is_pci ) { +#endif DRM_IOREMAPFREE( dev_priv->cce_ring ); DRM_IOREMAPFREE( dev_priv->ring_rptr ); DRM_IOREMAPFREE( dev_priv->buffers ); +#if __REALLY_HAVE_SG } else { if (!DRM(ati_pcigart_cleanup)( dev, dev_priv->phys_pci_gart, dev_priv->bus_pci_gart )) DRM_ERROR( "failed to cleanup PCI GART!\n" ); } +#endif DRM(free)( dev->dev_private, sizeof(drm_r128_private_t), DRM_MEM_DRIVER ); @@ -713,7 +710,7 @@ */ if ( stop.idle ) { ret = r128_do_cce_idle( dev_priv ); - if ( ret < 0 ) return ret; + if ( ret ) return ret; } /* Finally, we can turn off the CCE. If the engine isn't idle, @@ -790,7 +787,7 @@ static int r128_do_init_pageflip( drm_device_t *dev ) { drm_r128_private_t *dev_priv = dev->dev_private; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); dev_priv->crtc_offset = R128_READ( R128_CRTC_OFFSET ); dev_priv->crtc_offset_cntl = R128_READ( R128_CRTC_OFFSET_CNTL ); @@ -808,7 +805,7 @@ int r128_do_cleanup_pageflip( drm_device_t *dev ) { drm_r128_private_t *dev_priv = dev->dev_private; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); R128_WRITE( R128_CRTC_OFFSET, dev_priv->crtc_offset ); R128_WRITE( R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl ); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/r128_drm.h linux.21pre4-ac2/drivers/char/drm/r128_drm.h --- linux.21pre4/drivers/char/drm/r128_drm.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/r128_drm.h 2003-01-06 17:28:08.000000000 +0000 @@ -170,6 +170,27 @@ /* WARNING: If you change any of these defines, make sure to change the * defines in the Xserver file (xf86drmR128.h) */ + +/* Rage 128 specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t) +#define DRM_IOCTL_R128_CCE_START DRM_IO( 0x41) +#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( 0x42, drm_r128_cce_stop_t) +#define DRM_IOCTL_R128_CCE_RESET DRM_IO( 0x43) +#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( 0x44) +#define DRM_IOCTL_R128_RESET DRM_IO( 0x46) +#define DRM_IOCTL_R128_SWAP DRM_IO( 0x47) +#define DRM_IOCTL_R128_CLEAR DRM_IOW( 0x48, drm_r128_clear_t) +#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x49, drm_r128_vertex_t) +#define DRM_IOCTL_R128_INDICES DRM_IOW( 0x4a, drm_r128_indices_t) +#define DRM_IOCTL_R128_BLIT DRM_IOW( 0x4b, drm_r128_blit_t) +#define DRM_IOCTL_R128_DEPTH DRM_IOW( 0x4c, drm_r128_depth_t) +#define DRM_IOCTL_R128_STIPPLE DRM_IOW( 0x4d, drm_r128_stipple_t) +#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(0x4f, drm_r128_indirect_t) +#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( 0x50, drm_r128_fullscreen_t) +#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( 0x51, drm_r128_clear2_t) + typedef struct drm_r128_init { enum { R128_INIT_CCE = 0x01, diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/r128_drv.c linux.21pre4-ac2/drivers/char/drm/r128_drv.c --- linux.21pre4/drivers/char/drm/r128_drv.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/r128_drv.c 2003-01-06 17:28:08.000000000 +0000 @@ -32,48 +32,11 @@ #include #include "r128.h" #include "drmP.h" +#include "drm.h" +#include "r128_drm.h" #include "r128_drv.h" #include "ati_pcigart.h" -#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." - -#define DRIVER_NAME "r128" -#define DRIVER_DESC "ATI Rage 128" -#define DRIVER_DATE "20010917" - -#define DRIVER_MAJOR 2 -#define DRIVER_MINOR 2 -#define DRIVER_PATCHLEVEL 0 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { r128_cce_buffers, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_cce_init, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_START)] = { r128_cce_start, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_STOP)] = { r128_cce_stop, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_RESET)] = { r128_cce_reset, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_IDLE)] = { r128_cce_idle, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_engine_reset, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_FULLSCREEN)] = { r128_fullscreen, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_SWAP)] = { r128_cce_swap, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_CLEAR)] = { r128_cce_clear, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_cce_vertex, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_INDICES)] = { r128_cce_indices, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_BLIT)] = { r128_cce_blit, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_DEPTH)] = { r128_cce_depth, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_STIPPLE)] = { r128_cce_stipple, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_R128_INDIRECT)] = { r128_cce_indirect, 1, 1 }, - - -#if 0 -/* GH: Count data sent to card via ring or vertex/indirect buffers. - */ -#define __HAVE_COUNTERS 3 -#define __HAVE_COUNTER6 _DRM_STAT_IRQ -#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY -#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY -#endif - - #include "drm_agpsupport.h" #include "drm_auth.h" #include "drm_bufs.h" @@ -81,26 +44,6 @@ #include "drm_dma.h" #include "drm_drawable.h" #include "drm_drv.h" - -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init r128_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", r128_options ); -#endif - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/r128_drv.h linux.21pre4-ac2/drivers/char/drm/r128_drv.h --- linux.21pre4/drivers/char/drm/r128_drv.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/r128_drv.h 2003-01-31 11:54:14.000000000 +0000 @@ -33,9 +33,10 @@ #ifndef __R128_DRV_H__ #define __R128_DRV_H__ +#include -#define GET_RING_HEAD( ring ) le32_to_cpu( *(ring)->head ) -#define SET_RING_HEAD( ring, val ) *(ring)->head = cpu_to_le32( val ) +#define GET_RING_HEAD(ring) readl( (volatile u32 *) (ring)->head ) +#define SET_RING_HEAD(ring,val) writel( (val), (volatile u32 *) (ring)->head ) typedef struct drm_r128_freelist { unsigned int age; @@ -384,44 +385,11 @@ #define R128_BASE(reg) ((unsigned long)(dev_priv->mmio->handle)) #define R128_ADDR(reg) (R128_BASE( reg ) + reg) -#define R128_DEREF(reg) *(volatile u32 *)R128_ADDR( reg ) -#ifdef __alpha__ -#define R128_READ(reg) (_R128_READ((u32 *)R128_ADDR(reg))) -static inline u32 _R128_READ(u32 *addr) -{ - mb(); - return *(volatile u32 *)addr; -} -#define R128_WRITE(reg,val) \ -do { \ - wmb(); \ - R128_DEREF(reg) = val; \ -} while (0) -#else -#define R128_READ(reg) le32_to_cpu( R128_DEREF( reg ) ) -#define R128_WRITE(reg,val) \ -do { \ - R128_DEREF( reg ) = cpu_to_le32( val ); \ -} while (0) -#endif +#define R128_READ(reg) readl( (volatile u32 *) R128_ADDR(reg) ) +#define R128_WRITE(reg,val) writel( (val) , (volatile u32 *) R128_ADDR(reg)) -#define R128_DEREF8(reg) *(volatile u8 *)R128_ADDR( reg ) -#ifdef __alpha__ -#define R128_READ8(reg) _R128_READ8((u8 *)R128_ADDR(reg)) -static inline u8 _R128_READ8(u8 *addr) -{ - mb(); - return *(volatile u8 *)addr; -} -#define R128_WRITE8(reg,val) \ -do { \ - wmb(); \ - R128_DEREF8(reg) = val; \ -} while (0) -#else -#define R128_READ8(reg) R128_DEREF8( reg ) -#define R128_WRITE8(reg,val) do { R128_DEREF8( reg ) = val; } while (0) -#endif +#define R128_READ8(reg) readb( (volatile u8 *) R128_ADDR(reg) ) +#define R128_WRITE8(reg,val) writeb( (val), (volatile u8 *) R128_ADDR(reg) ) #define R128_WRITE_PLL(addr,val) \ do { \ @@ -470,6 +438,7 @@ return -EBUSY; \ } \ __ring_space_done: ; \ + break; \ } while (0) #define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ @@ -493,7 +462,11 @@ * Ring control */ +#if defined(__powerpc__) +#define r128_flush_write_combine() (void) GET_RING_HEAD( &dev_priv->ring ) +#else #define r128_flush_write_combine() mb() +#endif #define R128_VERBOSE 0 diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/r128.h linux.21pre4-ac2/drivers/char/drm/r128.h --- linux.21pre4/drivers/char/drm/r128.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/r128.h 2003-01-06 17:28:08.000000000 +0000 @@ -43,6 +43,35 @@ #define __HAVE_SG 1 #define __HAVE_PCI_DMA 1 +#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." + +#define DRIVER_NAME "r128" +#define DRIVER_DESC "ATI Rage 128" +#define DRIVER_DATE "20010917" + +#define DRIVER_MAJOR 2 +#define DRIVER_MINOR 2 +#define DRIVER_PATCHLEVEL 0 + + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { r128_cce_buffers, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_cce_init, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_START)] = { r128_cce_start, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_STOP)] = { r128_cce_stop, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_RESET)] = { r128_cce_reset, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_IDLE)] = { r128_cce_idle, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_engine_reset, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_FULLSCREEN)] = { r128_fullscreen, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_SWAP)] = { r128_cce_swap, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_CLEAR)] = { r128_cce_clear, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_cce_vertex, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_INDICES)] = { r128_cce_indices, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_BLIT)] = { r128_cce_blit, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_DEPTH)] = { r128_cce_depth, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_STIPPLE)] = { r128_cce_stipple, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_R128_INDIRECT)] = { r128_cce_indirect, 1, 1 }, + /* Driver customization: */ #define DRIVER_PRERELEASE() do { \ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/r128_state.c linux.21pre4-ac2/drivers/char/drm/r128_state.c --- linux.21pre4/drivers/char/drm/r128_state.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/r128_state.c 2003-01-06 17:28:08.000000000 +0000 @@ -29,9 +29,9 @@ #include "r128.h" #include "drmP.h" -#include "r128_drv.h" #include "drm.h" -#include +#include "r128_drm.h" +#include "r128_drv.h" /* ================================================================ @@ -528,7 +528,7 @@ { drm_r128_private_t *dev_priv = dev->dev_private; RING_LOCALS; - DRM_DEBUG( "%s: page=%d\n", __FUNCTION__, dev_priv->current_page ); + DRM_DEBUG( "page=%d\n", dev_priv->current_page ); #if R128_PERFORMANCE_BOXES /* Do some trivial performance monitoring... @@ -577,8 +577,7 @@ int prim = buf_priv->prim; int i = 0; RING_LOCALS; - DRM_DEBUG( "%s: buf=%d nbox=%d\n", - __FUNCTION__, buf->idx, sarea_priv->nbox ); + DRM_DEBUG( "buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox ); if ( 0 ) r128_print_dirty( "dispatch_vertex", sarea_priv->dirty ); @@ -789,7 +788,7 @@ u32 *data; int dword_shift, dwords; RING_LOCALS; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); /* The compiler won't optimize away a division by a variable, * even if the only legal values are powers of two. Thus, we'll diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/radeon_cp.c linux.21pre4-ac2/drivers/char/drm/radeon_cp.c --- linux.21pre4/drivers/char/drm/radeon_cp.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/radeon_cp.c 2003-01-30 16:48:42.000000000 +0000 @@ -30,21 +30,278 @@ #include "radeon.h" #include "drmP.h" +#include "drm.h" +#include "radeon_drm.h" #include "radeon_drv.h" +#include "drm_os_linux.h" #include /* For task queue support */ #include - #define RADEON_FIFO_DEBUG 0 -#if defined(__alpha__) -# define PCIGART_ENABLED -#else -# undef PCIGART_ENABLED -#endif /* CP microcode (from ATI) */ +static u32 R200_cp_microcode[][2] = { + { 0x21007000, 0000000000 }, + { 0x20007000, 0000000000 }, + { 0x000000ab, 0x00000004 }, + { 0x000000af, 0x00000004 }, + { 0x66544a49, 0000000000 }, + { 0x49494174, 0000000000 }, + { 0x54517d83, 0000000000 }, + { 0x498d8b64, 0000000000 }, + { 0x49494949, 0000000000 }, + { 0x49da493c, 0000000000 }, + { 0x49989898, 0000000000 }, + { 0xd34949d5, 0000000000 }, + { 0x9dc90e11, 0000000000 }, + { 0xce9b9b9b, 0000000000 }, + { 0x000f0000, 0x00000016 }, + { 0x352e232c, 0000000000 }, + { 0x00000013, 0x00000004 }, + { 0x000f0000, 0x00000016 }, + { 0x352e272c, 0000000000 }, + { 0x000f0001, 0x00000016 }, + { 0x3239362f, 0000000000 }, + { 0x000077ef, 0x00000002 }, + { 0x00061000, 0x00000002 }, + { 0x00000020, 0x0000001a }, + { 0x00004000, 0x0000001e }, + { 0x00061000, 0x00000002 }, + { 0x00000020, 0x0000001a }, + { 0x00004000, 0x0000001e }, + { 0x00061000, 0x00000002 }, + { 0x00000020, 0x0000001a }, + { 0x00004000, 0x0000001e }, + { 0x00000016, 0x00000004 }, + { 0x0003802a, 0x00000002 }, + { 0x040067e0, 0x00000002 }, + { 0x00000016, 0x00000004 }, + { 0x000077e0, 0x00000002 }, + { 0x00065000, 0x00000002 }, + { 0x000037e1, 0x00000002 }, + { 0x040067e1, 0x00000006 }, + { 0x000077e0, 0x00000002 }, + { 0x000077e1, 0x00000002 }, + { 0x000077e1, 0x00000006 }, + { 0xffffffff, 0000000000 }, + { 0x10000000, 0000000000 }, + { 0x0003802a, 0x00000002 }, + { 0x040067e0, 0x00000006 }, + { 0x00007675, 0x00000002 }, + { 0x00007676, 0x00000002 }, + { 0x00007677, 0x00000002 }, + { 0x00007678, 0x00000006 }, + { 0x0003802b, 0x00000002 }, + { 0x04002676, 0x00000002 }, + { 0x00007677, 0x00000002 }, + { 0x00007678, 0x00000006 }, + { 0x0000002e, 0x00000018 }, + { 0x0000002e, 0x00000018 }, + { 0000000000, 0x00000006 }, + { 0x0000002f, 0x00000018 }, + { 0x0000002f, 0x00000018 }, + { 0000000000, 0x00000006 }, + { 0x01605000, 0x00000002 }, + { 0x00065000, 0x00000002 }, + { 0x00098000, 0x00000002 }, + { 0x00061000, 0x00000002 }, + { 0x64c0603d, 0x00000004 }, + { 0x00080000, 0x00000016 }, + { 0000000000, 0000000000 }, + { 0x0400251d, 0x00000002 }, + { 0x00007580, 0x00000002 }, + { 0x00067581, 0x00000002 }, + { 0x04002580, 0x00000002 }, + { 0x00067581, 0x00000002 }, + { 0x00000046, 0x00000004 }, + { 0x00005000, 0000000000 }, + { 0x00061000, 0x00000002 }, + { 0x0000750e, 0x00000002 }, + { 0x00019000, 0x00000002 }, + { 0x00011055, 0x00000014 }, + { 0x00000055, 0x00000012 }, + { 0x0400250f, 0x00000002 }, + { 0x0000504a, 0x00000004 }, + { 0x00007565, 0x00000002 }, + { 0x00007566, 0x00000002 }, + { 0x00000051, 0x00000004 }, + { 0x01e655b4, 0x00000002 }, + { 0x4401b0dc, 0x00000002 }, + { 0x01c110dc, 0x00000002 }, + { 0x2666705d, 0x00000018 }, + { 0x040c2565, 0x00000002 }, + { 0x0000005d, 0x00000018 }, + { 0x04002564, 0x00000002 }, + { 0x00007566, 0x00000002 }, + { 0x00000054, 0x00000004 }, + { 0x00401060, 0x00000008 }, + { 0x00101000, 0x00000002 }, + { 0x000d80ff, 0x00000002 }, + { 0x00800063, 0x00000008 }, + { 0x000f9000, 0x00000002 }, + { 0x000e00ff, 0x00000002 }, + { 0000000000, 0x00000006 }, + { 0x00000080, 0x00000018 }, + { 0x00000054, 0x00000004 }, + { 0x00007576, 0x00000002 }, + { 0x00065000, 0x00000002 }, + { 0x00009000, 0x00000002 }, + { 0x00041000, 0x00000002 }, + { 0x0c00350e, 0x00000002 }, + { 0x00049000, 0x00000002 }, + { 0x00051000, 0x00000002 }, + { 0x01e785f8, 0x00000002 }, + { 0x00200000, 0x00000002 }, + { 0x00600073, 0x0000000c }, + { 0x00007563, 0x00000002 }, + { 0x006075f0, 0x00000021 }, + { 0x20007068, 0x00000004 }, + { 0x00005068, 0x00000004 }, + { 0x00007576, 0x00000002 }, + { 0x00007577, 0x00000002 }, + { 0x0000750e, 0x00000002 }, + { 0x0000750f, 0x00000002 }, + { 0x00a05000, 0x00000002 }, + { 0x00600076, 0x0000000c }, + { 0x006075f0, 0x00000021 }, + { 0x000075f8, 0x00000002 }, + { 0x00000076, 0x00000004 }, + { 0x000a750e, 0x00000002 }, + { 0x0020750f, 0x00000002 }, + { 0x00600079, 0x00000004 }, + { 0x00007570, 0x00000002 }, + { 0x00007571, 0x00000002 }, + { 0x00007572, 0x00000006 }, + { 0x00005000, 0x00000002 }, + { 0x00a05000, 0x00000002 }, + { 0x00007568, 0x00000002 }, + { 0x00061000, 0x00000002 }, + { 0x00000084, 0x0000000c }, + { 0x00058000, 0x00000002 }, + { 0x0c607562, 0x00000002 }, + { 0x00000086, 0x00000004 }, + { 0x00600085, 0x00000004 }, + { 0x400070dd, 0000000000 }, + { 0x000380dd, 0x00000002 }, + { 0x00000093, 0x0000001c }, + { 0x00065095, 0x00000018 }, + { 0x040025bb, 0x00000002 }, + { 0x00061096, 0x00000018 }, + { 0x040075bc, 0000000000 }, + { 0x000075bb, 0x00000002 }, + { 0x000075bc, 0000000000 }, + { 0x00090000, 0x00000006 }, + { 0x00090000, 0x00000002 }, + { 0x000d8002, 0x00000006 }, + { 0x00005000, 0x00000002 }, + { 0x00007821, 0x00000002 }, + { 0x00007800, 0000000000 }, + { 0x00007821, 0x00000002 }, + { 0x00007800, 0000000000 }, + { 0x01665000, 0x00000002 }, + { 0x000a0000, 0x00000002 }, + { 0x000671cc, 0x00000002 }, + { 0x0286f1cd, 0x00000002 }, + { 0x000000a3, 0x00000010 }, + { 0x21007000, 0000000000 }, + { 0x000000aa, 0x0000001c }, + { 0x00065000, 0x00000002 }, + { 0x000a0000, 0x00000002 }, + { 0x00061000, 0x00000002 }, + { 0x000b0000, 0x00000002 }, + { 0x38067000, 0x00000002 }, + { 0x000a00a6, 0x00000004 }, + { 0x20007000, 0000000000 }, + { 0x01200000, 0x00000002 }, + { 0x20077000, 0x00000002 }, + { 0x01200000, 0x00000002 }, + { 0x20007000, 0000000000 }, + { 0x00061000, 0x00000002 }, + { 0x0120751b, 0x00000002 }, + { 0x8040750a, 0x00000002 }, + { 0x8040750b, 0x00000002 }, + { 0x00110000, 0x00000002 }, + { 0x000380dd, 0x00000002 }, + { 0x000000bd, 0x0000001c }, + { 0x00061096, 0x00000018 }, + { 0x844075bd, 0x00000002 }, + { 0x00061095, 0x00000018 }, + { 0x840075bb, 0x00000002 }, + { 0x00061096, 0x00000018 }, + { 0x844075bc, 0x00000002 }, + { 0x000000c0, 0x00000004 }, + { 0x804075bd, 0x00000002 }, + { 0x800075bb, 0x00000002 }, + { 0x804075bc, 0x00000002 }, + { 0x00108000, 0x00000002 }, + { 0x01400000, 0x00000002 }, + { 0x006000c4, 0x0000000c }, + { 0x20c07000, 0x00000020 }, + { 0x000000c6, 0x00000012 }, + { 0x00800000, 0x00000006 }, + { 0x0080751d, 0x00000006 }, + { 0x000025bb, 0x00000002 }, + { 0x000040c0, 0x00000004 }, + { 0x0000775c, 0x00000002 }, + { 0x00a05000, 0x00000002 }, + { 0x00661000, 0x00000002 }, + { 0x0460275d, 0x00000020 }, + { 0x00004000, 0000000000 }, + { 0x00007999, 0x00000002 }, + { 0x00a05000, 0x00000002 }, + { 0x00661000, 0x00000002 }, + { 0x0460299b, 0x00000020 }, + { 0x00004000, 0000000000 }, + { 0x01e00830, 0x00000002 }, + { 0x21007000, 0000000000 }, + { 0x00005000, 0x00000002 }, + { 0x00038042, 0x00000002 }, + { 0x040025e0, 0x00000002 }, + { 0x000075e1, 0000000000 }, + { 0x00000001, 0000000000 }, + { 0x000380d9, 0x00000002 }, + { 0x04007394, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, + { 0000000000, 0000000000 }, +}; + + static u32 radeon_cp_microcode[][2] = { { 0x21007000, 0000000000 }, { 0x20007000, 0000000000 }, @@ -346,6 +603,8 @@ u32 tmp; int i; + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + tmp = RADEON_READ( RADEON_RB2D_DSTCACHE_CTLSTAT ); tmp |= RADEON_RB2D_DC_FLUSH_ALL; RADEON_WRITE( RADEON_RB2D_DSTCACHE_CTLSTAT, tmp ); @@ -370,6 +629,8 @@ { int i; + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { int slots = ( RADEON_READ( RADEON_RBBM_STATUS ) & RADEON_RBBM_FIFOCNT_MASK ); @@ -388,8 +649,10 @@ { int i, ret; + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + ret = radeon_do_wait_for_fifo( dev_priv, 64 ); - if ( ret < 0 ) return ret; + if ( ret ) return ret; for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { if ( !(RADEON_READ( RADEON_RBBM_STATUS ) @@ -416,16 +679,31 @@ static void radeon_cp_load_microcode( drm_radeon_private_t *dev_priv ) { int i; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); radeon_do_wait_for_idle( dev_priv ); RADEON_WRITE( RADEON_CP_ME_RAM_ADDR, 0 ); - for ( i = 0 ; i < 256 ; i++ ) { - RADEON_WRITE( RADEON_CP_ME_RAM_DATAH, - radeon_cp_microcode[i][1] ); - RADEON_WRITE( RADEON_CP_ME_RAM_DATAL, - radeon_cp_microcode[i][0] ); + + if (dev_priv->is_r200) + { + DRM_INFO("Loading R200 Microcode\n"); + for ( i = 0 ; i < 256 ; i++ ) + { + RADEON_WRITE( RADEON_CP_ME_RAM_DATAH, + R200_cp_microcode[i][1] ); + RADEON_WRITE( RADEON_CP_ME_RAM_DATAL, + R200_cp_microcode[i][0] ); + } + } + else + { + for ( i = 0 ; i < 256 ; i++ ) { + RADEON_WRITE( RADEON_CP_ME_RAM_DATAH, + radeon_cp_microcode[i][1] ); + RADEON_WRITE( RADEON_CP_ME_RAM_DATAL, + radeon_cp_microcode[i][0] ); + } } } @@ -435,7 +713,7 @@ */ static void radeon_do_cp_flush( drm_radeon_private_t *dev_priv ) { - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); #if 0 u32 tmp; @@ -449,7 +727,7 @@ int radeon_do_cp_idle( drm_radeon_private_t *dev_priv ) { RING_LOCALS; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); BEGIN_RING( 6 ); @@ -458,6 +736,7 @@ RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING(); + COMMIT_RING(); return radeon_do_wait_for_idle( dev_priv ); } @@ -467,7 +746,7 @@ static void radeon_do_cp_start( drm_radeon_private_t *dev_priv ) { RING_LOCALS; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); radeon_do_wait_for_idle( dev_priv ); @@ -482,6 +761,7 @@ RADEON_WAIT_UNTIL_IDLE(); ADVANCE_RING(); + COMMIT_RING(); } /* Reset the Command Processor. This will not flush any pending @@ -491,7 +771,7 @@ static void radeon_do_cp_reset( drm_radeon_private_t *dev_priv ) { u32 cur_read_ptr; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); cur_read_ptr = RADEON_READ( RADEON_CP_RB_RPTR ); RADEON_WRITE( RADEON_CP_RB_WPTR, cur_read_ptr ); @@ -505,7 +785,7 @@ */ static void radeon_do_cp_stop( drm_radeon_private_t *dev_priv ) { - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); RADEON_WRITE( RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS ); @@ -518,7 +798,7 @@ { drm_radeon_private_t *dev_priv = dev->dev_private; u32 clock_cntl_index, mclk_cntl, rbbm_soft_reset; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); radeon_do_pixcache_flush( dev_priv ); @@ -622,13 +902,63 @@ RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]); - DRM_DEBUG( "ring rptr: offset=0x%08llx handle=0x%08lx\n", - (u64)entry->busaddr[page_ofs], + DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n", + entry->busaddr[page_ofs], entry->handle + tmp_ofs ); } + /* Initialize the scratch register pointer. This will cause + * the scratch register values to be written out to memory + * whenever they are updated. + * + * We simply put this behind the ring read pointer, this works + * with PCI GART as well as (whatever kind of) AGP GART + */ + RADEON_WRITE( RADEON_SCRATCH_ADDR, RADEON_READ( RADEON_CP_RB_RPTR_ADDR ) + + RADEON_SCRATCH_REG_OFFSET ); + + dev_priv->scratch = ((__volatile__ u32 *) + dev_priv->ring.head + + (RADEON_SCRATCH_REG_OFFSET / sizeof(u32))); + + RADEON_WRITE( RADEON_SCRATCH_UMSK, 0x7 ); + + /* Writeback doesn't seem to work everywhere, test it first */ + writel(0, &dev_priv->scratch[1]); + RADEON_WRITE( RADEON_SCRATCH_REG1, 0xdeadbeef ); + + for ( tmp = 0 ; tmp < dev_priv->usec_timeout ; tmp++ ) { + if ( readl( &dev_priv->scratch[1] ) == 0xdeadbeef ) + break; + udelay(1); + } + + if ( tmp < dev_priv->usec_timeout ) { + dev_priv->writeback_works = 1; + DRM_DEBUG( "writeback test succeeded, tmp=%d\n", tmp ); + } else { + dev_priv->writeback_works = 0; + DRM_DEBUG( "writeback test failed\n" ); + } + + dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0; + RADEON_WRITE( RADEON_LAST_FRAME_REG, + dev_priv->sarea_priv->last_frame ); + + dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0; + RADEON_WRITE( RADEON_LAST_DISPATCH_REG, + dev_priv->sarea_priv->last_dispatch ); + + dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0; + RADEON_WRITE( RADEON_LAST_CLEAR_REG, + dev_priv->sarea_priv->last_clear ); + /* Set ring buffer size */ +#ifdef __BIG_ENDIAN + RADEON_WRITE( RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw | RADEON_BUF_SWAP_32BIT ); +#else RADEON_WRITE( RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw ); +#endif radeon_do_wait_for_idle( dev_priv ); @@ -647,9 +977,8 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init ) { drm_radeon_private_t *dev_priv; - struct list_head *list; u32 tmp; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); dev_priv = DRM(alloc)( sizeof(drm_radeon_private_t), DRM_MEM_DRIVER ); if ( dev_priv == NULL ) @@ -659,17 +988,6 @@ dev_priv->is_pci = init->is_pci; -#if !defined(PCIGART_ENABLED) - /* PCI support is not 100% working, so we disable it here. - */ - if ( dev_priv->is_pci ) { - DRM_ERROR( "PCI GART not yet supported for Radeon!\n" ); - dev->dev_private = (void *)dev_priv; - radeon_do_cleanup_cp(dev); - return -EINVAL; - } -#endif - if ( dev_priv->is_pci && !dev->sg ) { DRM_ERROR( "PCI GART memory not allocated!\n" ); dev->dev_private = (void *)dev_priv; @@ -686,12 +1004,10 @@ return -EINVAL; } + dev_priv->is_r200 = (init->func == RADEON_INIT_R200_CP); + dev_priv->do_boxes = 0; dev_priv->cp_mode = init->cp_mode; - /* Simple idle check. - */ - atomic_set( &dev_priv->idle_count, 0 ); - /* We don't support anything other than bus-mastering ring mode, * but the ring can be in either AGP or PCI space for the ring * read pointer. @@ -743,17 +1059,17 @@ * and screwing with the clear operation. */ dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE | - RADEON_Z_ENABLE | (dev_priv->color_fmt << 10) | - RADEON_ZBLOCK16); + (1<<15)); - dev_priv->depth_clear.rb3d_zstencilcntl = (dev_priv->depth_fmt | - RADEON_Z_TEST_ALWAYS | - RADEON_STENCIL_TEST_ALWAYS | - RADEON_STENCIL_S_FAIL_KEEP | - RADEON_STENCIL_ZPASS_KEEP | - RADEON_STENCIL_ZFAIL_KEEP | - RADEON_Z_WRITE_ENABLE); + dev_priv->depth_clear.rb3d_zstencilcntl = + (dev_priv->depth_fmt | + RADEON_Z_TEST_ALWAYS | + RADEON_STENCIL_TEST_ALWAYS | + RADEON_STENCIL_S_FAIL_REPLACE | + RADEON_STENCIL_ZPASS_REPLACE | + RADEON_STENCIL_ZFAIL_REPLACE | + RADEON_Z_WRITE_ENABLE); dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW | RADEON_BFACE_SOLID | @@ -767,15 +1083,8 @@ RADEON_ROUND_MODE_TRUNC | RADEON_ROUND_PREC_8TH_PIX); - list_for_each(list, &dev->maplist->head) { - drm_map_list_t *r_list = (drm_map_list_t *)list; - if( r_list->map && - r_list->map->type == _DRM_SHM && - r_list->map->flags & _DRM_CONTAINS_LOCK ) { - dev_priv->sarea = r_list->map; - break; - } - } + DRM_GETSAREA(); + if(!dev_priv->sarea) { DRM_ERROR("could not find sarea!\n"); dev->dev_private = (void *)dev_priv; @@ -896,34 +1205,7 @@ dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; -#if 0 - /* Initialize the scratch register pointer. This will cause - * the scratch register values to be written out to memory - * whenever they are updated. - * FIXME: This doesn't quite work yet, so we're disabling it - * for the release. - */ - RADEON_WRITE( RADEON_SCRATCH_ADDR, (dev_priv->ring_rptr->offset + - RADEON_SCRATCH_REG_OFFSET) ); - RADEON_WRITE( RADEON_SCRATCH_UMSK, 0x7 ); -#endif - - dev_priv->scratch = ((__volatile__ u32 *) - dev_priv->ring_rptr->handle + - (RADEON_SCRATCH_REG_OFFSET / sizeof(u32))); - - dev_priv->sarea_priv->last_frame = 0; - RADEON_WRITE( RADEON_LAST_FRAME_REG, - dev_priv->sarea_priv->last_frame ); - - dev_priv->sarea_priv->last_dispatch = 0; - RADEON_WRITE( RADEON_LAST_DISPATCH_REG, - dev_priv->sarea_priv->last_dispatch ); - - dev_priv->sarea_priv->last_clear = 0; - RADEON_WRITE( RADEON_LAST_CLEAR_REG, - dev_priv->sarea_priv->last_clear ); - +#if __REALLY_HAVE_SG if ( dev_priv->is_pci ) { if (!DRM(ati_pcigart_init)( dev, &dev_priv->phys_pci_gart, &dev_priv->bus_pci_gart)) { @@ -953,19 +1235,20 @@ RADEON_WRITE( RADEON_MC_AGP_LOCATION, 0xffffffc0 ); /* ?? */ RADEON_WRITE( RADEON_AGP_COMMAND, 0 ); /* clear AGP_COMMAND */ } else { +#endif /* __REALLY_HAVE_SG */ /* Turn off PCI GART */ tmp = RADEON_READ( RADEON_AIC_CNTL ) & ~RADEON_PCIGART_TRANSLATE_EN; RADEON_WRITE( RADEON_AIC_CNTL, tmp ); +#if __REALLY_HAVE_SG } +#endif /* __REALLY_HAVE_SG */ radeon_cp_load_microcode( dev_priv ); radeon_cp_init_ring_buffer( dev, dev_priv ); -#if ROTATE_BUFS dev_priv->last_buf = 0; -#endif dev->dev_private = (void *)dev_priv; @@ -976,7 +1259,7 @@ int radeon_do_cleanup_cp( drm_device_t *dev ) { - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); if ( dev->dev_private ) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -986,10 +1269,12 @@ DRM_IOREMAPFREE( dev_priv->ring_rptr ); DRM_IOREMAPFREE( dev_priv->buffers ); } else { +#if __REALLY_HAVE_SG if (!DRM(ati_pcigart_cleanup)( dev, dev_priv->phys_pci_gart, dev_priv->bus_pci_gart )) DRM_ERROR( "failed to cleanup PCI GART!\n" ); +#endif /* __REALLY_HAVE_SG */ } DRM(free)( dev->dev_private, sizeof(drm_radeon_private_t), @@ -1012,6 +1297,7 @@ switch ( init.func ) { case RADEON_INIT_CP: + case RADEON_INIT_R200_CP: return radeon_do_init_cp( dev, &init ); case RADEON_CLEANUP_CP: return radeon_do_cleanup_cp( dev ); @@ -1075,7 +1361,7 @@ */ if ( stop.idle ) { ret = radeon_do_cp_idle( dev_priv ); - if ( ret < 0 ) return ret; + if ( ret ) return ret; } /* Finally, we can turn off the CP. If the engine isn't idle, @@ -1145,117 +1431,74 @@ * Fullscreen mode */ -static int radeon_do_init_pageflip( drm_device_t *dev ) -{ - drm_radeon_private_t *dev_priv = dev->dev_private; - DRM_DEBUG( "%s\n", __FUNCTION__ ); - - dev_priv->crtc_offset = RADEON_READ( RADEON_CRTC_OFFSET ); - dev_priv->crtc_offset_cntl = RADEON_READ( RADEON_CRTC_OFFSET_CNTL ); - - RADEON_WRITE( RADEON_CRTC_OFFSET, dev_priv->front_offset ); - RADEON_WRITE( RADEON_CRTC_OFFSET_CNTL, - dev_priv->crtc_offset_cntl | - RADEON_CRTC_OFFSET_FLIP_CNTL ); - - dev_priv->page_flipping = 1; - dev_priv->current_page = 0; - - return 0; -} - -int radeon_do_cleanup_pageflip( drm_device_t *dev ) +/* KW: Deprecated to say the least: + */ +int radeon_fullscreen(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data) { - drm_radeon_private_t *dev_priv = dev->dev_private; - DRM_DEBUG( "%s\n", __FUNCTION__ ); - - RADEON_WRITE( RADEON_CRTC_OFFSET, dev_priv->crtc_offset ); - RADEON_WRITE( RADEON_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl ); - - dev_priv->page_flipping = 0; - dev_priv->current_page = 0; - return 0; } -int radeon_fullscreen( struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg ) -{ - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - drm_radeon_fullscreen_t fs; - - LOCK_TEST_WITH_RETURN( dev ); - - if ( copy_from_user( &fs, (drm_radeon_fullscreen_t *)arg, - sizeof(fs) ) ) - return -EFAULT; - - switch ( fs.func ) { - case RADEON_INIT_FULLSCREEN: - return radeon_do_init_pageflip( dev ); - case RADEON_CLEANUP_FULLSCREEN: - return radeon_do_cleanup_pageflip( dev ); - } - - return -EINVAL; -} - /* ================================================================ * Freelist management */ -#define RADEON_BUFFER_USED 0xffffffff -#define RADEON_BUFFER_FREE 0 -#if 0 -static int radeon_freelist_init( drm_device_t *dev ) +/* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through + * bufs until freelist code is used. Note this hides a problem with + * the scratch register * (used to keep track of last buffer + * completed) being written to before * the last buffer has actually + * completed rendering. + * + * KW: It's also a good way to find free buffers quickly. + * + * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't + * sleep. However, bugs in older versions of radeon_accel.c mean that + * we essentially have to do this, else old clients will break. + * + * However, it does leave open a potential deadlock where all the + * buffers are held by other clients, which can't release them because + * they can't get the lock. + */ + +drm_buf_t *radeon_freelist_get( drm_device_t *dev ) { drm_device_dma_t *dma = dev->dma; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_buf_t *buf; drm_radeon_buf_priv_t *buf_priv; - drm_radeon_freelist_t *entry; - int i; - - dev_priv->head = DRM(alloc)( sizeof(drm_radeon_freelist_t), - DRM_MEM_DRIVER ); - if ( dev_priv->head == NULL ) - return -ENOMEM; - - memset( dev_priv->head, 0, sizeof(drm_radeon_freelist_t) ); - dev_priv->head->age = RADEON_BUFFER_USED; + drm_buf_t *buf; + int i, t; + int start; - for ( i = 0 ; i < dma->buf_count ; i++ ) { - buf = dma->buflist[i]; - buf_priv = buf->dev_private; + if ( ++dev_priv->last_buf >= dma->buf_count ) + dev_priv->last_buf = 0; - entry = DRM(alloc)( sizeof(drm_radeon_freelist_t), - DRM_MEM_DRIVER ); - if ( !entry ) return -ENOMEM; - - entry->age = RADEON_BUFFER_FREE; - entry->buf = buf; - entry->prev = dev_priv->head; - entry->next = dev_priv->head->next; - if ( !entry->next ) - dev_priv->tail = entry; - - buf_priv->discard = 0; - buf_priv->dispatched = 0; - buf_priv->list_entry = entry; + start = dev_priv->last_buf; - dev_priv->head->next = entry; + for ( t = 0 ; t < dev_priv->usec_timeout ; t++ ) { + u32 done_age = GET_SCRATCH( 1 ); + DRM_DEBUG("done_age = %d\n",done_age); + for ( i = start ; i < dma->buf_count ; i++ ) { + buf = dma->buflist[i]; + buf_priv = buf->dev_private; + if ( buf->pid == 0 || (buf->pending && + buf_priv->age <= done_age) ) { + dev_priv->stats.requested_bufs++; + buf->pending = 0; + return buf; + } + start = 0; + } - if ( dev_priv->head->next ) - dev_priv->head->next->prev = entry; + if (t) { + udelay(1); + dev_priv->stats.freelist_loops++; + } } - return 0; - + DRM_ERROR( "returning NULL!\n" ); + return NULL; } -#endif - +#if 0 drm_buf_t *radeon_freelist_get( drm_device_t *dev ) { drm_device_dma_t *dma = dev->dma; @@ -1263,76 +1506,40 @@ drm_radeon_buf_priv_t *buf_priv; drm_buf_t *buf; int i, t; -#if ROTATE_BUFS int start; -#endif + u32 done_age = readl(&dev_priv->scratch[1]); - /* FIXME: Optimize -- use freelist code */ - - for ( i = 0 ; i < dma->buf_count ; i++ ) { - buf = dma->buflist[i]; - buf_priv = buf->dev_private; - if ( buf->pid == 0 ) { - DRM_DEBUG( " ret buf=%d last=%d pid=0\n", - buf->idx, dev_priv->last_buf ); - return buf; - } - DRM_DEBUG( " skipping buf=%d pid=%d\n", - buf->idx, buf->pid ); - } - -#if ROTATE_BUFS if ( ++dev_priv->last_buf >= dma->buf_count ) dev_priv->last_buf = 0; + start = dev_priv->last_buf; -#endif - for ( t = 0 ; t < dev_priv->usec_timeout ; t++ ) { -#if 0 - /* FIXME: Disable this for now */ - u32 done_age = dev_priv->scratch[RADEON_LAST_DISPATCH]; -#else - u32 done_age = RADEON_READ( RADEON_LAST_DISPATCH_REG ); -#endif -#if ROTATE_BUFS + dev_priv->stats.freelist_loops++; + + for ( t = 0 ; t < 2 ; t++ ) { for ( i = start ; i < dma->buf_count ; i++ ) { -#else - for ( i = 0 ; i < dma->buf_count ; i++ ) { -#endif buf = dma->buflist[i]; buf_priv = buf->dev_private; - if ( buf->pending && buf_priv->age <= done_age ) { - /* The buffer has been processed, so it - * can now be used. - */ + if ( buf->pid == 0 || (buf->pending && + buf_priv->age <= done_age) ) { + dev_priv->stats.requested_bufs++; buf->pending = 0; - DRM_DEBUG( " ret buf=%d last=%d age=%d done=%d\n", buf->idx, dev_priv->last_buf, buf_priv->age, done_age ); return buf; } - DRM_DEBUG( " skipping buf=%d age=%d done=%d\n", - buf->idx, buf_priv->age, - done_age ); -#if ROTATE_BUFS - start = 0; -#endif } - udelay( 1 ); + start = 0; } - DRM_ERROR( "returning NULL!\n" ); return NULL; } +#endif void radeon_freelist_reset( drm_device_t *dev ) { drm_device_dma_t *dma = dev->dma; -#if ROTATE_BUFS drm_radeon_private_t *dev_priv = dev->dev_private; -#endif int i; -#if ROTATE_BUFS dev_priv->last_buf = 0; -#endif for ( i = 0 ; i < dma->buf_count ; i++ ) { drm_buf_t *buf = dma->buflist[i]; drm_radeon_buf_priv_t *buf_priv = buf->dev_private; @@ -1349,11 +1556,23 @@ { drm_radeon_ring_buffer_t *ring = &dev_priv->ring; int i; + u32 last_head = GET_RING_HEAD(ring); for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { - radeon_update_ring_snapshot( ring ); + u32 head = GET_RING_HEAD(ring); + + ring->space = (head - ring->tail) * sizeof(u32); + if ( ring->space <= 0 ) + ring->space += ring->size; if ( ring->space > n ) return 0; + + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + + if (head != last_head) + i = 0; + last_head = head; + udelay( 1 ); } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/radeon_drm.h linux.21pre4-ac2/drivers/char/drm/radeon_drm.h --- linux.21pre4/drivers/char/drm/radeon_drm.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/radeon_drm.h 2003-01-06 17:28:08.000000000 +0000 @@ -2,6 +2,7 @@ * * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Fremont, California. + * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -26,6 +27,7 @@ * Authors: * Kevin E. Martin * Gareth Hughes + * Keith Whitwell */ #ifndef __RADEON_DRM_H__ @@ -37,7 +39,8 @@ #ifndef __RADEON_SAREA_DEFINES__ #define __RADEON_SAREA_DEFINES__ -/* What needs to be changed for the current vertex buffer? +/* Old style state flags, required for sarea interface (1.1 and 1.2 + * clears) and 1.2 drm_vertex2 ioctl. */ #define RADEON_UPLOAD_CONTEXT 0x00000001 #define RADEON_UPLOAD_VERTFMT 0x00000002 @@ -56,11 +59,136 @@ #define RADEON_UPLOAD_TEX2IMAGES 0x00004000 #define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */ #define RADEON_REQUIRE_QUIESCENCE 0x00010000 -#define RADEON_UPLOAD_ALL 0x0001ffff +#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */ +#define RADEON_UPLOAD_ALL 0x003effff +#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff + + +/* New style per-packet identifiers for use in cmd_buffer ioctl with + * the RADEON_EMIT_PACKET command. Comments relate new packets to old + * state bits and the packet size: + */ +#define RADEON_EMIT_PP_MISC 0 /* context/7 */ +#define RADEON_EMIT_PP_CNTL 1 /* context/3 */ +#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */ +#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */ +#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */ +#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */ +#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */ +#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */ +#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */ +#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */ +#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */ +#define RADEON_EMIT_RE_MISC 11 /* misc/1 */ +#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */ +#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */ +#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */ +#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */ +#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */ +#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */ +#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */ +#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */ +#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */ +#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */ +#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */ +#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */ +#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */ +#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */ +#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */ +#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */ +#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */ +#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */ +#define R200_EMIT_TFACTOR_0 30 /* tf/7 */ +#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */ +#define R200_EMIT_VAP_CTL 32 /* vap/1 */ +#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */ +#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */ +#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */ +#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */ +#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */ +#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */ +#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */ +#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */ +#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */ +#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */ +#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */ +#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */ +#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */ +#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */ +#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */ +#define R200_EMIT_VTE_CNTL 48 /* vte/1 */ +#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */ +#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */ +#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */ +#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */ +#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */ +#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */ +#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */ +#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */ +#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */ +#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */ +#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */ +#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */ +#define R200_EMIT_PP_CUBIC_FACES_0 61 +#define R200_EMIT_PP_CUBIC_OFFSETS_0 62 +#define R200_EMIT_PP_CUBIC_FACES_1 63 +#define R200_EMIT_PP_CUBIC_OFFSETS_1 64 +#define R200_EMIT_PP_CUBIC_FACES_2 65 +#define R200_EMIT_PP_CUBIC_OFFSETS_2 66 +#define R200_EMIT_PP_CUBIC_FACES_3 67 +#define R200_EMIT_PP_CUBIC_OFFSETS_3 68 +#define R200_EMIT_PP_CUBIC_FACES_4 69 +#define R200_EMIT_PP_CUBIC_OFFSETS_4 70 +#define R200_EMIT_PP_CUBIC_FACES_5 71 +#define R200_EMIT_PP_CUBIC_OFFSETS_5 72 +#define RADEON_MAX_STATE_PACKETS 73 + + +/* Commands understood by cmd_buffer ioctl. More can be added but + * obviously these can't be removed or changed: + */ +#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */ +#define RADEON_CMD_SCALARS 2 /* emit scalar data */ +#define RADEON_CMD_VECTORS 3 /* emit vector data */ +#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */ +#define RADEON_CMD_PACKET3 5 /* emit hw packet */ +#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */ +#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */ +#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note: + * doesn't make the cpu wait, just + * the graphics hardware */ + + +typedef union { + int i; + struct { + unsigned char cmd_type, pad0, pad1, pad2; + } header; + struct { + unsigned char cmd_type, packet_id, pad0, pad1; + } packet; + struct { + unsigned char cmd_type, offset, stride, count; + } scalars; + struct { + unsigned char cmd_type, offset, stride, count; + } vectors; + struct { + unsigned char cmd_type, buf_idx, pad0, pad1; + } dma; + struct { + unsigned char cmd_type, flags, pad0, pad1; + } wait; +} drm_radeon_cmd_header_t; + +#define RADEON_WAIT_2D 0x1 +#define RADEON_WAIT_3D 0x2 + #define RADEON_FRONT 0x1 #define RADEON_BACK 0x2 #define RADEON_DEPTH 0x4 +#define RADEON_STENCIL 0x8 /* Primitive types */ @@ -78,12 +206,9 @@ /* Byte offsets for indirect buffer data */ #define RADEON_INDEX_PRIM_OFFSET 20 -#define RADEON_HOSTDATA_BLIT_OFFSET 32 #define RADEON_SCRATCH_REG_OFFSET 32 -/* Keep these small for testing - */ #define RADEON_NR_SAREA_CLIPRECTS 12 /* There are 2 heaps (local/AGP). Each region within a heap is a @@ -95,7 +220,7 @@ #define RADEON_NR_TEX_REGIONS 64 #define RADEON_LOG_TEX_GRANULARITY 16 -#define RADEON_MAX_TEXTURE_LEVELS 11 +#define RADEON_MAX_TEXTURE_LEVELS 12 #define RADEON_MAX_TEXTURE_UNITS 3 #endif /* __RADEON_SAREA_DEFINES__ */ @@ -155,28 +280,18 @@ /* Setup state */ unsigned int se_cntl_status; /* 0x2140 */ -#ifdef TCL_ENABLE - /* TCL state */ - radeon_color_regs_t se_tcl_material_emmissive; /* 0x2210 */ - radeon_color_regs_t se_tcl_material_ambient; - radeon_color_regs_t se_tcl_material_diffuse; - radeon_color_regs_t se_tcl_material_specular; - unsigned int se_tcl_shininess; - unsigned int se_tcl_output_vtx_fmt; - unsigned int se_tcl_output_vtx_sel; - unsigned int se_tcl_matrix_select_0; - unsigned int se_tcl_matrix_select_1; - unsigned int se_tcl_ucp_vert_blend_ctl; - unsigned int se_tcl_texture_proc_ctl; - unsigned int se_tcl_light_model_ctl; - unsigned int se_tcl_per_light_ctl[4]; -#endif - /* Misc state */ unsigned int re_top_left; /* 0x26c0 */ unsigned int re_misc; } drm_radeon_context_regs_t; +typedef struct { + /* Zbias state */ + unsigned int se_zbias_factor; /* 0x1dac */ + unsigned int se_zbias_constant; +} drm_radeon_context2_regs_t; + + /* Setup registers for each texture unit */ typedef struct { @@ -186,24 +301,37 @@ unsigned int pp_txcblend; unsigned int pp_txablend; unsigned int pp_tfactor; - unsigned int pp_border_color; - -#ifdef CUBIC_ENABLE - unsigned int pp_cubic_faces; - unsigned int pp_cubic_offset[5]; -#endif } drm_radeon_texture_regs_t; typedef struct { + unsigned int start; + unsigned int finish; + unsigned int prim:8; + unsigned int stateidx:8; + unsigned int numverts:16; /* overloaded as offset/64 for elt prims */ + unsigned int vc_format; /* vertex format */ +} drm_radeon_prim_t; + + +typedef struct { + drm_radeon_context_regs_t context; + drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS]; + drm_radeon_context2_regs_t context2; + unsigned int dirty; +} drm_radeon_state_t; + + +typedef struct { unsigned char next, prev; unsigned char in_use; int age; } drm_radeon_tex_region_t; typedef struct { - /* The channel for communication of state information to the kernel - * on firing a vertex buffer. + /* The channel for communication of state information to the + * kernel on firing a vertex buffer with either of the + * obsoleted vertex/index ioctls. */ drm_radeon_context_regs_t context_state; drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS]; @@ -225,16 +353,50 @@ drm_radeon_tex_region_t tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS+1]; int tex_age[RADEON_NR_TEX_HEAPS]; int ctx_owner; + int pfState; /* number of 3d windows (0,1,2ormore) */ + int pfCurrentPage; /* which buffer is being displayed? */ + int crtc2_base; /* CRTC2 frame offset */ } drm_radeon_sarea_t; /* WARNING: If you change any of these defines, make sure to change the * defines in the Xserver file (xf86drmRadeon.h) + * + * KW: actually it's illegal to change any of this (backwards compatibility). */ + +/* Radeon specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x40, drm_radeon_init_t) +#define DRM_IOCTL_RADEON_CP_START DRM_IO( 0x41) +#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( 0x42, drm_radeon_cp_stop_t) +#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( 0x43) +#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( 0x44) +#define DRM_IOCTL_RADEON_RESET DRM_IO( 0x45) +#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( 0x46, drm_radeon_fullscreen_t) +#define DRM_IOCTL_RADEON_SWAP DRM_IO( 0x47) +#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( 0x48, drm_radeon_clear_t) +#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( 0x49, drm_radeon_vertex_t) +#define DRM_IOCTL_RADEON_INDICES DRM_IOW( 0x4a, drm_radeon_indices_t) +#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t) +#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t) +#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t) +#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( 0x4f, drm_radeon_vertex_t) +#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( 0x50, drm_radeon_cmd_buffer_t) +#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(0x51, drm_radeon_getparam_t) +#define DRM_IOCTL_RADEON_FLIP DRM_IO( 0x52) +#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR( 0x53, drm_radeon_mem_alloc_t) +#define DRM_IOCTL_RADEON_FREE DRM_IOW( 0x54, drm_radeon_mem_free_t) +#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( 0x55, drm_radeon_mem_init_heap_t) +#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR( 0x56, drm_radeon_irq_emit_t) +#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( 0x57, drm_radeon_irq_wait_t) + typedef struct drm_radeon_init { enum { RADEON_INIT_CP = 0x01, - RADEON_CLEANUP_CP = 0x02 + RADEON_CLEANUP_CP = 0x02, + RADEON_INIT_R200_CP = 0x03, } func; unsigned long sarea_priv_offset; int is_pci; @@ -285,7 +447,7 @@ unsigned int clear_color; unsigned int clear_depth; unsigned int color_mask; - unsigned int depth_mask; + unsigned int depth_mask; /* misnamed field: should be stencil */ drm_radeon_clear_rect_t *depth_boxes; } drm_radeon_clear_t; @@ -304,6 +466,36 @@ int discard; /* Client finished with buffer? */ } drm_radeon_indices_t; +/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices + * - allows multiple primitives and state changes in a single ioctl + * - supports driver change to emit native primitives + */ +typedef struct drm_radeon_vertex2 { + int idx; /* Index of vertex buffer */ + int discard; /* Client finished with buffer? */ + int nr_states; + drm_radeon_state_t *state; + int nr_prims; + drm_radeon_prim_t *prim; +} drm_radeon_vertex2_t; + +/* v1.3 - obsoletes drm_radeon_vertex2 + * - allows arbitarily large cliprect list + * - allows updating of tcl packet, vector and scalar state + * - allows memory-efficient description of state updates + * - allows state to be emitted without a primitive + * (for clears, ctx switches) + * - allows more than one dma buffer to be referenced per ioctl + * - supports tcl driver + * - may be extended in future versions with new cmd types, packets + */ +typedef struct drm_radeon_cmd_buffer { + int bufsz; + char *buf; + int nbox; + drm_clip_rect_t *boxes; +} drm_radeon_cmd_buffer_t; + typedef struct drm_radeon_tex_image { unsigned int x, y; /* Blit coordinates */ unsigned int width, height; @@ -330,4 +522,55 @@ int discard; } drm_radeon_indirect_t; + +/* 1.3: An ioctl to get parameters that aren't available to the 3d + * client any other way. + */ +#define RADEON_PARAM_AGP_BUFFER_OFFSET 1 /* card offset of 1st agp buffer */ +#define RADEON_PARAM_LAST_FRAME 2 +#define RADEON_PARAM_LAST_DISPATCH 3 +#define RADEON_PARAM_LAST_CLEAR 4 +#define RADEON_PARAM_IRQ_NR 5 +#define RADEON_PARAM_AGP_BASE 6 /* card offset of agp base */ + +typedef struct drm_radeon_getparam { + int param; + int *value; +} drm_radeon_getparam_t; + +/* 1.6: Set up a memory manager for regions of shared memory: + */ +#define RADEON_MEM_REGION_AGP 1 +#define RADEON_MEM_REGION_FB 2 + +typedef struct drm_radeon_mem_alloc { + int region; + int alignment; + int size; + int *region_offset; /* offset from start of fb or agp */ +} drm_radeon_mem_alloc_t; + +typedef struct drm_radeon_mem_free { + int region; + int region_offset; +} drm_radeon_mem_free_t; + +typedef struct drm_radeon_mem_init_heap { + int region; + int size; + int start; +} drm_radeon_mem_init_heap_t; + + +/* 1.6: Userspace can request & wait on irq's: + */ +typedef struct drm_radeon_irq_emit { + int *irq_seq; +} drm_radeon_irq_emit_t; + +typedef struct drm_radeon_irq_wait { + int irq_seq; +} drm_radeon_irq_wait_t; + + #endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/radeon_drv.c linux.21pre4-ac2/drivers/char/drm/radeon_drv.c --- linux.21pre4/drivers/char/drm/radeon_drv.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/radeon_drv.c 2003-01-06 17:28:08.000000000 +0000 @@ -30,47 +30,11 @@ #include #include "radeon.h" #include "drmP.h" +#include "drm.h" +#include "radeon_drm.h" #include "radeon_drv.h" #include "ati_pcigart.h" -#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." - -#define DRIVER_NAME "radeon" -#define DRIVER_DESC "ATI Radeon" -#define DRIVER_DATE "20010405" - -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 1 -#define DRIVER_PATCHLEVEL 1 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_INIT)] = { radeon_cp_init, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_START)] = { radeon_cp_start, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_STOP)] = { radeon_cp_stop, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_RESET)] = { radeon_cp_reset, 1, 1 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_IDLE)] = { radeon_cp_idle, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_RESET)] = { radeon_engine_reset, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FULLSCREEN)] = { radeon_fullscreen, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_SWAP)] = { radeon_cp_swap, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CLEAR)] = { radeon_cp_clear, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_VERTEX)] = { radeon_cp_vertex, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDICES)] = { radeon_cp_indices, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_TEXTURE)] = { radeon_cp_texture, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_STIPPLE)] = { radeon_cp_stipple, 1, 0 }, \ - [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDIRECT)] = { radeon_cp_indirect, 1, 1 }, - - -#if 0 -/* GH: Count data sent to card via ring or vertex/indirect buffers. - */ -#define __HAVE_COUNTERS 3 -#define __HAVE_COUNTER6 _DRM_STAT_IRQ -#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY -#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY -#endif - - #include "drm_agpsupport.h" #include "drm_auth.h" #include "drm_bufs.h" @@ -78,26 +42,6 @@ #include "drm_dma.h" #include "drm_drawable.h" #include "drm_drv.h" - -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init radeon_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", radeon_options ); -#endif - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/radeon_drv.h linux.21pre4-ac2/drivers/char/drm/radeon_drv.h --- linux.21pre4/drivers/char/drm/radeon_drv.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/radeon_drv.h 2003-01-06 17:28:08.000000000 +0000 @@ -31,6 +31,9 @@ #ifndef __RADEON_DRV_H__ #define __RADEON_DRV_H__ +#define GET_RING_HEAD(ring) readl( (volatile u32 *) (ring)->head ) +#define SET_RING_HEAD(ring,val) writel( (val), (volatile u32 *) (ring)->head ) + typedef struct drm_radeon_freelist { unsigned int age; drm_buf_t *buf; @@ -58,6 +61,15 @@ u32 se_cntl; } drm_radeon_depth_clear_t; + +struct mem_block { + struct mem_block *next; + struct mem_block *prev; + int start; + int size; + int pid; /* 0: free, -1: heap, other: real pids */ +}; + typedef struct drm_radeon_private { drm_radeon_ring_buffer_t ring; drm_radeon_sarea_t *sarea_priv; @@ -71,27 +83,32 @@ drm_radeon_freelist_t *head; drm_radeon_freelist_t *tail; -/* FIXME: ROTATE_BUFS is a hask to cycle through bufs until freelist - code is used. Note this hides a problem with the scratch register - (used to keep track of last buffer completed) being written to before - the last buffer has actually completed rendering. */ -#define ROTATE_BUFS 1 -#if ROTATE_BUFS int last_buf; -#endif volatile u32 *scratch; + int writeback_works; int usec_timeout; + + int is_r200; + int is_pci; unsigned long phys_pci_gart; dma_addr_t bus_pci_gart; - atomic_t idle_count; + struct { + u32 boxes; + int freelist_timeouts; + int freelist_loops; + int requested_bufs; + int last_frame_reads; + int last_clear_reads; + int clears; + int texture_uploads; + } stats; + int do_boxes; int page_flipping; int current_page; - u32 crtc_offset; - u32 crtc_offset_cntl; u32 color_fmt; unsigned int front_offset; @@ -116,14 +133,18 @@ drm_map_t *ring_rptr; drm_map_t *buffers; drm_map_t *agp_textures; + + struct mem_block *agp_heap; + struct mem_block *fb_heap; + + /* SW interrupt */ + wait_queue_head_t swi_queue; + atomic_t swi_emitted; + } drm_radeon_private_t; typedef struct drm_radeon_buf_priv { u32 age; - int prim; - int discard; - int dispatched; - drm_radeon_freelist_t *list_entry; } drm_radeon_buf_priv_t; /* radeon_cp.c */ @@ -149,14 +170,6 @@ extern int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n ); -static inline void -radeon_update_ring_snapshot( drm_radeon_ring_buffer_t *ring ) -{ - ring->space = (*(volatile int *)ring->head - ring->tail) * sizeof(u32); - if ( ring->space <= 0 ) - ring->space += ring->size; -} - extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv ); extern int radeon_do_cleanup_cp( drm_device_t *dev ); extern int radeon_do_cleanup_pageflip( drm_device_t *dev ); @@ -176,6 +189,34 @@ unsigned int cmd, unsigned long arg ); extern int radeon_cp_indirect( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ); +extern int radeon_cp_vertex2(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern int radeon_cp_cmdbuf(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern int radeon_cp_getparam(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern int radeon_cp_flip(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); + +extern int radeon_mem_alloc(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern int radeon_mem_free(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern int radeon_mem_init_heap(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern void radeon_mem_takedown( struct mem_block **heap ); +extern void radeon_mem_release( struct mem_block *heap ); + + /* radeon_irq.c */ +extern int radeon_irq_emit(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); +extern int radeon_irq_wait(struct inode *inode, struct file *filp,unsigned int cmd, unsigned long arg ); + +extern int radeon_emit_and_wait_irq(drm_device_t *dev); +extern int radeon_wait_irq(drm_device_t *dev, int swi_nr); +extern int radeon_emit_irq(drm_device_t *dev); + + +/* Flags for stats.boxes + */ +#define RADEON_BOX_DMA_IDLE 0x1 +#define RADEON_BOX_RING_FULL 0x2 +#define RADEON_BOX_FLIP 0x4 +#define RADEON_BOX_WAIT_IDLE 0x8 +#define RADEON_BOX_TEXTURE_LOAD 0x10 + /* Register definitions, register access macros and drmAddMap constants @@ -202,10 +243,10 @@ #define RADEON_CRTC_OFFSET_CNTL 0x0228 # define RADEON_CRTC_TILE_EN (1 << 15) # define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16) +#define RADEON_CRTC2_OFFSET 0x0324 +#define RADEON_CRTC2_OFFSET_CNTL 0x0328 #define RADEON_RB3D_COLORPITCH 0x1c48 -#define RADEON_RB3D_DEPTHCLEARVALUE 0x1c30 -#define RADEON_RB3D_DEPTHXY_OFFSET 0x1c60 #define RADEON_DP_GUI_MASTER_CNTL 0x146c # define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0) @@ -240,6 +281,24 @@ #define RADEON_SCRATCH_UMSK 0x0770 #define RADEON_SCRATCH_ADDR 0x0774 +#define GET_SCRATCH( x ) (dev_priv->writeback_works \ + ? readl( &dev_priv->scratch[(x)] ) \ + : RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) ) + + +#define RADEON_GEN_INT_CNTL 0x0040 +# define RADEON_CRTC_VBLANK_MASK (1 << 0) +# define RADEON_GUI_IDLE_INT_ENABLE (1 << 19) +# define RADEON_SW_INT_ENABLE (1 << 25) + +#define RADEON_GEN_INT_STATUS 0x0044 +# define RADEON_CRTC_VBLANK_STAT (1 << 0) +# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) +# define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19) +# define RADEON_SW_INT_TEST (1 << 25) +# define RADEON_SW_INT_TEST_ACK (1 << 25) +# define RADEON_SW_INT_FIRE (1 << 26) + #define RADEON_HOST_PATH_CNTL 0x0130 # define RADEON_HDP_SOFT_RESET (1 << 26) # define RADEON_HDP_WC_TIMEOUT_MASK (7 << 28) @@ -253,6 +312,12 @@ # define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4) # define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5) +#define RADEON_RBBM_GUICNTL 0x172c +# define RADEON_HOST_DATA_SWAP_NONE (0 << 0) +# define RADEON_HOST_DATA_SWAP_16BIT (1 << 0) +# define RADEON_HOST_DATA_SWAP_32BIT (2 << 0) +# define RADEON_HOST_DATA_SWAP_HDW (3 << 0) + #define RADEON_MC_AGP_LOCATION 0x014c #define RADEON_MC_FB_LOCATION 0x0148 #define RADEON_MCLK_CNTL 0x0012 @@ -290,10 +355,8 @@ # define RADEON_ROP_ENABLE (1 << 6) # define RADEON_STENCIL_ENABLE (1 << 7) # define RADEON_Z_ENABLE (1 << 8) -# define RADEON_DEPTH_XZ_OFFEST_ENABLE (1 << 9) -# define RADEON_ZBLOCK8 (0 << 15) -# define RADEON_ZBLOCK16 (1 << 15) #define RADEON_RB3D_DEPTHOFFSET 0x1c24 +#define RADEON_RB3D_DEPTHPITCH 0x1c28 #define RADEON_RB3D_PLANEMASK 0x1d84 #define RADEON_RB3D_STENCILREFMASK 0x1d7c #define RADEON_RB3D_ZCACHE_MODE 0x3250 @@ -306,9 +369,9 @@ # define RADEON_Z_TEST_MASK (7 << 4) # define RADEON_Z_TEST_ALWAYS (7 << 4) # define RADEON_STENCIL_TEST_ALWAYS (7 << 12) -# define RADEON_STENCIL_S_FAIL_KEEP (0 << 16) -# define RADEON_STENCIL_ZPASS_KEEP (0 << 20) -# define RADEON_STENCIL_ZFAIL_KEEP (0 << 20) +# define RADEON_STENCIL_S_FAIL_REPLACE (2 << 16) +# define RADEON_STENCIL_ZPASS_REPLACE (2 << 20) +# define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24) # define RADEON_Z_WRITE_ENABLE (1 << 30) #define RADEON_RBBM_SOFT_RESET 0x00f0 # define RADEON_SOFT_RESET_CP (1 << 0) @@ -357,6 +420,16 @@ #define RADEON_SE_CNTL_STATUS 0x2140 #define RADEON_SE_LINE_WIDTH 0x1db8 #define RADEON_SE_VPORT_XSCALE 0x1d98 +#define RADEON_SE_ZBIAS_FACTOR 0x1db0 +#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210 +#define RADEON_SE_TCL_OUTPUT_VTX_FMT 0x2254 +#define RADEON_SE_TCL_VECTOR_INDX_REG 0x2200 +# define RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT 16 +# define RADEON_VEC_INDX_DWORD_COUNT_SHIFT 28 +#define RADEON_SE_TCL_VECTOR_DATA_REG 0x2204 +#define RADEON_SE_TCL_SCALAR_INDX_REG 0x2208 +# define RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT 16 +#define RADEON_SE_TCL_SCALAR_DATA_REG 0x220C #define RADEON_SURFACE_ACCESS_FLAGS 0x0bf8 #define RADEON_SURFACE_ACCESS_CLR 0x0bfc #define RADEON_SURFACE_CNTL 0x0b00 @@ -421,6 +494,7 @@ #define RADEON_CP_RB_BASE 0x0700 #define RADEON_CP_RB_CNTL 0x0704 +# define RADEON_BUF_SWAP_32BIT (2 << 16) #define RADEON_CP_RB_RPTR_ADDR 0x070c #define RADEON_CP_RB_RPTR 0x0710 #define RADEON_CP_RB_WPTR 0x0714 @@ -457,11 +531,14 @@ #define RADEON_CP_PACKET3 0xC0000000 # define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300 # define RADEON_WAIT_FOR_IDLE 0x00002600 +# define RADEON_3D_DRAW_VBUF 0x00002800 # define RADEON_3D_DRAW_IMMD 0x00002900 -# define RADEON_3D_CLEAR_ZMASK 0x00003200 +# define RADEON_3D_DRAW_INDX 0x00002A00 +# define RADEON_3D_LOAD_VBPNTR 0x00002F00 # define RADEON_CNTL_HOSTDATA_BLT 0x00009400 # define RADEON_CNTL_PAINT_MULTI 0x00009A00 # define RADEON_CNTL_BITBLT_MULTI 0x00009B00 +# define RADEON_CNTL_SET_SCISSORS 0xC0001E00 #define RADEON_CP_PACKET_MASK 0xC0000000 #define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000 @@ -470,6 +547,7 @@ #define RADEON_CP_PACKET1_REG1_MASK 0x003ff800 #define RADEON_VTX_Z_PRESENT (1 << 31) +#define RADEON_VTX_PKCOLOR_PRESENT (1 << 3) #define RADEON_PRIM_TYPE_NONE (0 << 0) #define RADEON_PRIM_TYPE_POINT (1 << 0) @@ -482,6 +560,7 @@ #define RADEON_PRIM_TYPE_RECT_LIST (8 << 0) #define RADEON_PRIM_TYPE_3VRT_POINT_LIST (9 << 0) #define RADEON_PRIM_TYPE_3VRT_LINE_LIST (10 << 0) +#define RADEON_PRIM_TYPE_MASK 0xf #define RADEON_PRIM_WALK_IND (1 << 4) #define RADEON_PRIM_WALK_LIST (2 << 4) #define RADEON_PRIM_WALK_RING (3 << 4) @@ -508,6 +587,105 @@ #define RADEON_TXFORMAT_ARGB4444 5 #define RADEON_TXFORMAT_ARGB8888 6 #define RADEON_TXFORMAT_RGBA8888 7 +#define RADEON_TXFORMAT_VYUY422 10 +#define RADEON_TXFORMAT_YVYU422 11 +#define RADEON_TXFORMAT_DXT1 12 +#define RADEON_TXFORMAT_DXT23 14 +#define RADEON_TXFORMAT_DXT45 15 + +#define R200_PP_TXCBLEND_0 0x2f00 +#define R200_PP_TXCBLEND_1 0x2f10 +#define R200_PP_TXCBLEND_2 0x2f20 +#define R200_PP_TXCBLEND_3 0x2f30 +#define R200_PP_TXCBLEND_4 0x2f40 +#define R200_PP_TXCBLEND_5 0x2f50 +#define R200_PP_TXCBLEND_6 0x2f60 +#define R200_PP_TXCBLEND_7 0x2f70 +#define R200_SE_TCL_LIGHT_MODEL_CTL_0 0x2268 +#define R200_PP_TFACTOR_0 0x2ee0 +#define R200_SE_VTX_FMT_0 0x2088 +#define R200_SE_VAP_CNTL 0x2080 +#define R200_SE_TCL_MATRIX_SEL_0 0x2230 +#define R200_SE_TCL_TEX_PROC_CTL_2 0x22a8 +#define R200_SE_TCL_UCP_VERT_BLEND_CTL 0x22c0 +#define R200_PP_TXFILTER_5 0x2ca0 +#define R200_PP_TXFILTER_4 0x2c80 +#define R200_PP_TXFILTER_3 0x2c60 +#define R200_PP_TXFILTER_2 0x2c40 +#define R200_PP_TXFILTER_1 0x2c20 +#define R200_PP_TXFILTER_0 0x2c00 +#define R200_PP_TXOFFSET_5 0x2d78 +#define R200_PP_TXOFFSET_4 0x2d60 +#define R200_PP_TXOFFSET_3 0x2d48 +#define R200_PP_TXOFFSET_2 0x2d30 +#define R200_PP_TXOFFSET_1 0x2d18 +#define R200_PP_TXOFFSET_0 0x2d00 + +#define R200_PP_CUBIC_FACES_0 0x2c18 +#define R200_PP_CUBIC_FACES_1 0x2c38 +#define R200_PP_CUBIC_FACES_2 0x2c58 +#define R200_PP_CUBIC_FACES_3 0x2c78 +#define R200_PP_CUBIC_FACES_4 0x2c98 +#define R200_PP_CUBIC_FACES_5 0x2cb8 +#define R200_PP_CUBIC_OFFSET_F1_0 0x2d04 +#define R200_PP_CUBIC_OFFSET_F2_0 0x2d08 +#define R200_PP_CUBIC_OFFSET_F3_0 0x2d0c +#define R200_PP_CUBIC_OFFSET_F4_0 0x2d10 +#define R200_PP_CUBIC_OFFSET_F5_0 0x2d14 +#define R200_PP_CUBIC_OFFSET_F1_1 0x2d1c +#define R200_PP_CUBIC_OFFSET_F2_1 0x2d20 +#define R200_PP_CUBIC_OFFSET_F3_1 0x2d24 +#define R200_PP_CUBIC_OFFSET_F4_1 0x2d28 +#define R200_PP_CUBIC_OFFSET_F5_1 0x2d2c +#define R200_PP_CUBIC_OFFSET_F1_2 0x2d34 +#define R200_PP_CUBIC_OFFSET_F2_2 0x2d38 +#define R200_PP_CUBIC_OFFSET_F3_2 0x2d3c +#define R200_PP_CUBIC_OFFSET_F4_2 0x2d40 +#define R200_PP_CUBIC_OFFSET_F5_2 0x2d44 +#define R200_PP_CUBIC_OFFSET_F1_3 0x2d4c +#define R200_PP_CUBIC_OFFSET_F2_3 0x2d50 +#define R200_PP_CUBIC_OFFSET_F3_3 0x2d54 +#define R200_PP_CUBIC_OFFSET_F4_3 0x2d58 +#define R200_PP_CUBIC_OFFSET_F5_3 0x2d5c +#define R200_PP_CUBIC_OFFSET_F1_4 0x2d64 +#define R200_PP_CUBIC_OFFSET_F2_4 0x2d68 +#define R200_PP_CUBIC_OFFSET_F3_4 0x2d6c +#define R200_PP_CUBIC_OFFSET_F4_4 0x2d70 +#define R200_PP_CUBIC_OFFSET_F5_4 0x2d74 +#define R200_PP_CUBIC_OFFSET_F1_5 0x2d7c +#define R200_PP_CUBIC_OFFSET_F2_5 0x2d80 +#define R200_PP_CUBIC_OFFSET_F3_5 0x2d84 +#define R200_PP_CUBIC_OFFSET_F4_5 0x2d88 +#define R200_PP_CUBIC_OFFSET_F5_5 0x2d8c + +#define R200_RE_AUX_SCISSOR_CNTL 0x26f0 +#define R200_SE_VTE_CNTL 0x20b0 +#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL 0x2250 +#define R200_PP_TAM_DEBUG3 0x2d9c +#define R200_PP_CNTL_X 0x2cc4 +#define R200_SE_VAP_CNTL_STATUS 0x2140 +#define R200_RE_SCISSOR_TL_0 0x1cd8 +#define R200_RE_SCISSOR_TL_1 0x1ce0 +#define R200_RE_SCISSOR_TL_2 0x1ce8 +#define R200_RB3D_DEPTHXY_OFFSET 0x1d60 +#define R200_RE_AUX_SCISSOR_CNTL 0x26f0 +#define R200_SE_VTX_STATE_CNTL 0x2180 +#define R200_RE_POINTSIZE 0x2648 +#define R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0 0x2254 + + +#define SE_VAP_CNTL__TCL_ENA_MASK 0x00000001 +#define SE_VAP_CNTL__FORCE_W_TO_ONE_MASK 0x00010000 +#define SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT 0x00000012 +#define SE_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100 +#define SE_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200 +#define SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK 0x00000001 +#define SE_VTX_FMT_0__VTX_W0_PRESENT_MASK 0x00000002 +#define SE_VTX_FMT_0__VTX_COLOR_0_FMT__SHIFT 0x0000000b +#define R200_3D_DRAW_IMMD_2 0xC0003500 +#define R200_SE_VTX_FMT_1 0x208c +#define R200_RE_CNTL 0x1c50 + /* Constants */ #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ @@ -515,6 +693,7 @@ #define RADEON_LAST_FRAME_REG RADEON_SCRATCH_REG0 #define RADEON_LAST_DISPATCH_REG RADEON_SCRATCH_REG1 #define RADEON_LAST_CLEAR_REG RADEON_SCRATCH_REG2 +#define RADEON_LAST_SWI_REG RADEON_SCRATCH_REG3 #define RADEON_LAST_DISPATCH 1 #define RADEON_MAX_VB_AGE 0x7fffffff @@ -526,41 +705,11 @@ #define RADEON_BASE(reg) ((unsigned long)(dev_priv->mmio->handle)) #define RADEON_ADDR(reg) (RADEON_BASE( reg ) + reg) -#define RADEON_DEREF(reg) *(volatile u32 *)RADEON_ADDR( reg ) -#ifdef __alpha__ -#define RADEON_READ(reg) (_RADEON_READ((u32 *)RADEON_ADDR( reg ))) -static inline u32 _RADEON_READ(u32 *addr) -{ - mb(); - return *(volatile u32 *)addr; -} -#define RADEON_WRITE(reg,val) \ -do { \ - wmb(); \ - RADEON_DEREF(reg) = val; \ -} while (0) -#else -#define RADEON_READ(reg) RADEON_DEREF( reg ) -#define RADEON_WRITE(reg, val) do { RADEON_DEREF( reg ) = val; } while (0) -#endif +#define RADEON_READ(reg) readl( (volatile u32 *) RADEON_ADDR(reg) ) +#define RADEON_WRITE(reg,val) writel( (val), (volatile u32 *) RADEON_ADDR(reg)) -#define RADEON_DEREF8(reg) *(volatile u8 *)RADEON_ADDR( reg ) -#ifdef __alpha__ -#define RADEON_READ8(reg) _RADEON_READ8((u8 *)RADEON_ADDR( reg )) -static inline u8 _RADEON_READ8(u8 *addr) -{ - mb(); - return *(volatile u8 *)addr; -} -#define RADEON_WRITE8(reg,val) \ -do { \ - wmb(); \ - RADEON_DEREF8( reg ) = val; \ -} while (0) -#else -#define RADEON_READ8(reg) RADEON_DEREF8( reg ) -#define RADEON_WRITE8(reg, val) do { RADEON_DEREF8( reg ) = val; } while (0) -#endif +#define RADEON_READ8(reg) readb( (volatile u8 *) RADEON_ADDR(reg) ) +#define RADEON_WRITE8(reg,val) writeb( (val), (volatile u8 *) RADEON_ADDR(reg)) #define RADEON_WRITE_PLL( addr, val ) \ do { \ @@ -647,20 +796,16 @@ } \ } while (0) + +/* Perfbox functionality only. + */ #define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \ do { \ - drm_radeon_ring_buffer_t *ring = &dev_priv->ring; int i; \ - if ( ring->space < ring->high_mark ) { \ - for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { \ - radeon_update_ring_snapshot( ring ); \ - if ( ring->space >= ring->high_mark ) \ - goto __ring_space_done; \ - udelay( 1 ); \ - } \ - DRM_ERROR( "ring space check failed!\n" ); \ - return -EBUSY; \ + if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) { \ + u32 head = GET_RING_HEAD(&dev_priv->ring); \ + if (head == dev_priv->ring.tail) \ + dev_priv->stats.boxes |= RADEON_BOX_DMA_IDLE; \ } \ - __ring_space_done: ; \ } while (0) #define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ @@ -668,7 +813,7 @@ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; \ if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \ int __ret = radeon_do_cp_idle( dev_priv ); \ - if ( __ret < 0 ) return __ret; \ + if ( __ret ) return __ret; \ sarea_priv->last_dispatch = 0; \ radeon_freelist_reset( dev ); \ } \ @@ -694,12 +839,17 @@ * Ring control */ -#define radeon_flush_write_combine() mb() +#if defined(__powerpc__) +#define radeon_flush_write_combine() (void) GET_RING_HEAD( &dev_priv->ring ) +#else +#define radeon_flush_write_combine() wmb() +#warning PCI posting bug +#endif #define RADEON_VERBOSE 0 -#define RING_LOCALS int write; unsigned int mask; volatile u32 *ring; +#define RING_LOCALS int write, _nr; unsigned int mask; u32 *ring; #define BEGIN_RING( n ) do { \ if ( RADEON_VERBOSE ) { \ @@ -707,9 +857,10 @@ n, __FUNCTION__ ); \ } \ if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \ + COMMIT_RING(); \ radeon_wait_ring( dev_priv, (n) * sizeof(u32) ); \ } \ - dev_priv->ring.space -= (n) * sizeof(u32); \ + _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \ ring = dev_priv->ring.start; \ write = dev_priv->ring.tail; \ mask = dev_priv->ring.tail_mask; \ @@ -720,9 +871,17 @@ DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \ write, dev_priv->ring.tail ); \ } \ - radeon_flush_write_combine(); \ - dev_priv->ring.tail = write; \ - RADEON_WRITE( RADEON_CP_RB_WPTR, write ); \ + if (((dev_priv->ring.tail + _nr) & mask) != write) { \ + DRM_ERROR( \ + "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \ + ((dev_priv->ring.tail + _nr) & mask), \ + write, __LINE__); \ + } else \ + dev_priv->ring.tail = write; \ +} while (0) + +#define COMMIT_RING() do { \ + RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \ } while (0) #define OUT_RING( x ) do { \ @@ -734,6 +893,33 @@ write &= mask; \ } while (0) -#define RADEON_PERFORMANCE_BOXES 0 +#define OUT_RING_REG( reg, val ) do { \ + OUT_RING( CP_PACKET0( reg, 0 ) ); \ + OUT_RING( val ); \ +} while (0) + + +#define OUT_RING_USER_TABLE( tab, sz ) do { \ + int _size = (sz); \ + int *_tab = (tab); \ + \ + if (write + _size > mask) { \ + int i = (mask+1) - write; \ + if (__copy_from_user( (int *)(ring+write), \ + _tab, i*4 )) \ + return -EFAULT; \ + write = 0; \ + _size -= i; \ + _tab += i; \ + } \ + \ + if (_size && __copy_from_user( (int *)(ring+write), \ + _tab, _size*4 )) \ + return -EFAULT; \ + \ + write += _size; \ + write &= mask; \ +} while (0) + #endif /* __RADEON_DRV_H__ */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/radeon.h linux.21pre4-ac2/drivers/char/drm/radeon.h --- linux.21pre4/drivers/char/drm/radeon.h 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/radeon.h 2003-01-06 17:28:08.000000000 +0000 @@ -44,7 +44,78 @@ #define __HAVE_SG 1 #define __HAVE_PCI_DMA 1 -/* Driver customization: +#define DRIVER_AUTHOR "Gareth Hughes, Keith Whitwell, others." + +#define DRIVER_NAME "radeon" +#define DRIVER_DESC "ATI Radeon" +#define DRIVER_DATE "20020828" + +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 7 +#define DRIVER_PATCHLEVEL 0 + +/* Interface history: + * + * 1.1 - ?? + * 1.2 - Add vertex2 ioctl (keith) + * - Add stencil capability to clear ioctl (gareth, keith) + * - Increase MAX_TEXTURE_LEVELS (brian) + * 1.3 - Add cmdbuf ioctl (keith) + * - Add support for new radeon packets (keith) + * - Add getparam ioctl (keith) + * - Add flip-buffers ioctl, deprecate fullscreen foo (keith). + * 1.4 - Add scratch registers to get_param ioctl. + * 1.5 - Add r200 packets to cmdbuf ioctl + * - Add r200 function to init ioctl + * - Add 'scalar2' instruction to cmdbuf + * 1.6 - Add static agp memory manager + * Add irq handler (won't be turned on unless X server knows to) + * Add irq ioctls and irq_active getparam. + * Add wait command for cmdbuf ioctl + * Add agp offset query for getparam + * 1.7 - Add support for cube map registers: R200_PP_CUBIC_FACES_[0..5] + * and R200_PP_CUBIC_OFFSET_F1_[0..5]. + * Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and + * R200_EMIT_PP_CUBIC_OFFSETS_[0..5]. (brian) + */ +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_INIT)] = { radeon_cp_init, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_START)] = { radeon_cp_start, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_STOP)] = { radeon_cp_stop, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_RESET)] = { radeon_cp_reset, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_IDLE)] = { radeon_cp_idle, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_RESET)] = { radeon_engine_reset, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FULLSCREEN)] = { radeon_fullscreen, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_SWAP)] = { radeon_cp_swap, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CLEAR)] = { radeon_cp_clear, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_VERTEX)] = { radeon_cp_vertex, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDICES)] = { radeon_cp_indices, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_TEXTURE)] = { radeon_cp_texture, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_STIPPLE)] = { radeon_cp_stipple, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDIRECT)] = { radeon_cp_indirect, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_VERTEX2)] = { radeon_cp_vertex2, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CMDBUF)] = { radeon_cp_cmdbuf, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_GETPARAM)] = { radeon_cp_getparam, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FLIP)] = { radeon_cp_flip, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_ALLOC)] = { radeon_mem_alloc, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FREE)] = { radeon_mem_free, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INIT_HEAP)] = { radeon_mem_init_heap, 1, 1 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_IRQ_EMIT)] = { radeon_irq_emit, 1, 0 }, \ + [DRM_IOCTL_NR(DRM_IOCTL_RADEON_IRQ_WAIT)] = { radeon_irq_wait, 1, 0 }, + + +#define USE_IRQS 1 +#if USE_IRQS +#define __HAVE_DMA_IRQ 1 +#define __HAVE_VBL_IRQ 1 +#define __HAVE_SHARED_IRQ 1 + +/* When a client dies: + * - Check for and clean up flipped page state + * - Free any alloced agp memory. + * + * DRM infrastructure takes care of reclaiming dma buffers. */ #define DRIVER_PRERELEASE() do { \ if ( dev->dev_private ) { \ @@ -52,31 +123,36 @@ if ( dev_priv->page_flipping ) { \ radeon_do_cleanup_pageflip( dev ); \ } \ + radeon_mem_release( dev_priv->agp_heap ); \ } \ } while (0) +/* On unloading the module: + * - Free memory heap structure + * - Remove mappings made at startup and free dev_private. + */ #define DRIVER_PRETAKEDOWN() do { \ - if ( dev->dev_private ) radeon_do_cleanup_cp( dev ); \ + if ( dev->dev_private ) { \ + drm_radeon_private_t *dev_priv = dev->dev_private; \ + radeon_mem_takedown( &(dev_priv->agp_heap) ); \ + radeon_do_cleanup_cp( dev ); \ + } \ } while (0) +#else +#define __HAVE_DMA_IRQ 0 +#endif + /* DMA customization: */ #define __HAVE_DMA 1 -#if 0 -/* GH: Remove this for now... */ -#define __HAVE_DMA_QUIESCENT 1 -#define DRIVER_DMA_QUIESCENT() do { \ - drm_radeon_private_t *dev_priv = dev->dev_private; \ - return radeon_do_cp_idle( dev_priv ); \ -} while (0) -#endif /* Buffer customization: */ #define DRIVER_BUF_PRIV_T drm_radeon_buf_priv_t -#define DRIVER_AGP_BUFFERS_MAP( dev ) \ +#define DRIVER_AGP_BUFFERS_MAP( dev ) \ ((drm_radeon_private_t *)((dev)->dev_private))->buffers #endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/radeon_irq.c linux.21pre4-ac2/drivers/char/drm/radeon_irq.c --- linux.21pre4/drivers/char/drm/radeon_irq.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/char/drm/radeon_irq.c 2003-01-31 11:54:14.000000000 +0000 @@ -0,0 +1,256 @@ +/* radeon_irq.c -- IRQ handling for radeon -*- linux-c -*- + * + * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. + * + * The Weather Channel (TM) funded Tungsten Graphics to develop the + * initial release of the Radeon 8500 driver under the XFree86 license. + * This notice must be preserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Keith Whitwell + * Michel Dänzer + */ + +#include "radeon.h" +#include "drmP.h" +#include "drm.h" +#include "radeon_drm.h" +#include "radeon_drv.h" +#include "drm_os_linux.h" + +/* Interrupts - Used for device synchronization and flushing in the + * following circumstances: + * + * - Exclusive FB access with hw idle: + * - Wait for GUI Idle (?) interrupt, then do normal flush. + * + * - Frame throttling, NV_fence: + * - Drop marker irq's into command stream ahead of time. + * - Wait on irq's with lock *not held* + * - Check each for termination condition + * + * - Internally in cp_getbuffer, etc: + * - as above, but wait with lock held??? + * + * NOTE: These functions are misleadingly named -- the irq's aren't + * tied to dma at all, this is just a hangover from dri prehistory. + */ + +void DRM(dma_service)(int irq, void *arg, struct pt_regs *reg) +{ + drm_device_t *dev = (drm_device_t *) arg; + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *)dev->dev_private; + u32 stat; + + stat = RADEON_READ(RADEON_GEN_INT_STATUS); + if (!stat) + return; + + /* SW interrupt */ + if (stat & RADEON_SW_INT_TEST) { + wake_up_interruptible( &dev_priv->swi_queue ); + } + + /* VBLANK interrupt */ + if (stat & RADEON_CRTC_VBLANK_STAT) { + atomic_inc(&dev->vbl_received); + wake_up_interruptible(&dev->vbl_queue); + DRM(vbl_send_signals)(dev); + } + + /* Acknowledge all the bits in GEN_INT_STATUS -- seem to get + * more than we asked for... + */ + RADEON_WRITE(RADEON_GEN_INT_STATUS, stat); +} + +static __inline__ void radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv) +{ + u32 tmp = RADEON_READ( RADEON_GEN_INT_STATUS ); + if (tmp) + RADEON_WRITE( RADEON_GEN_INT_STATUS, tmp ); +} + +int radeon_emit_irq(drm_device_t *dev) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + unsigned int ret; + RING_LOCALS; + + atomic_inc(&dev_priv->swi_emitted); + ret = atomic_read(&dev_priv->swi_emitted); + + BEGIN_RING( 4 ); + OUT_RING_REG( RADEON_LAST_SWI_REG, ret ); + OUT_RING_REG( RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE ); + ADVANCE_RING(); + COMMIT_RING(); + + return ret; +} + + +int radeon_wait_irq(drm_device_t *dev, int swi_nr) +{ + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *)dev->dev_private; + int ret = 0; + + if (RADEON_READ( RADEON_LAST_SWI_REG ) >= swi_nr) + return 0; + + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + + /* This is a hack to work around mysterious freezes on certain + * systems: + */ + radeon_acknowledge_irqs( dev_priv ); + + DRM_WAIT_ON( ret, dev_priv->swi_queue, 3 * HZ, + RADEON_READ( RADEON_LAST_SWI_REG ) >= swi_nr ); + + return ret; +} + +int radeon_emit_and_wait_irq(drm_device_t *dev) +{ + return radeon_wait_irq( dev, radeon_emit_irq(dev) ); +} + + +int DRM(vblank_wait)(drm_device_t *dev, unsigned int *sequence) +{ + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *)dev->dev_private; + unsigned int cur_vblank; + int ret = 0; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + radeon_acknowledge_irqs( dev_priv ); + + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + + /* Assume that the user has missed the current sequence number + * by about a day rather than she wants to wait for years + * using vertical blanks... + */ + DRM_WAIT_ON( ret, dev->vbl_queue, 3*HZ, + ( ( ( cur_vblank = atomic_read(&dev->vbl_received ) ) + - ~*sequence + 1 ) <= (1<<23) ) ); + + *sequence = cur_vblank; + + return ret; +} + + +/* Needs the lock as it touches the ring. + */ +int radeon_irq_emit(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_irq_emit_t emit; + int result; + + LOCK_TEST_WITH_RETURN( dev ); + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( emit, (drm_radeon_irq_emit_t *)data, + sizeof(emit) ); + + result = radeon_emit_irq( dev ); + + if ( copy_to_user( emit.irq_seq, &result, sizeof(int) ) ) { + DRM_ERROR( "copy_to_user\n" ); + return -EFAULT; + } + + return 0; +} + + +/* Doesn't need the hardware lock. + */ +int radeon_irq_wait(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_irq_wait_t irqwait; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( irqwait, (drm_radeon_irq_wait_t *)data, + sizeof(irqwait) ); + + return radeon_wait_irq( dev, irqwait.irq_seq ); +} + + +/* drm_dma.h hooks +*/ +void DRM(driver_irq_preinstall)( drm_device_t *dev ) { + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *)dev->dev_private; + + /* Disable *all* interrupts */ + RADEON_WRITE( RADEON_GEN_INT_CNTL, 0 ); + + /* Clear bits if they're already high */ + radeon_acknowledge_irqs( dev_priv ); +} + +void DRM(driver_irq_postinstall)( drm_device_t *dev ) { + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *)dev->dev_private; + + atomic_set(&dev_priv->swi_emitted, 0); + init_waitqueue_head( &dev_priv->swi_queue ); + + /* Turn on SW and VBL ints */ + RADEON_WRITE( RADEON_GEN_INT_CNTL, + RADEON_CRTC_VBLANK_MASK | + RADEON_SW_INT_ENABLE ); +} + +void DRM(driver_irq_uninstall)( drm_device_t *dev ) { + drm_radeon_private_t *dev_priv = + (drm_radeon_private_t *)dev->dev_private; + if ( dev_priv ) { + /* Disable *all* interrupts */ + RADEON_WRITE( RADEON_GEN_INT_CNTL, 0 ); + } +} diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/radeon_mem.c linux.21pre4-ac2/drivers/char/drm/radeon_mem.c --- linux.21pre4/drivers/char/drm/radeon_mem.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/char/drm/radeon_mem.c 2003-01-06 17:28:08.000000000 +0000 @@ -0,0 +1,338 @@ +/* radeon_mem.c -- Simple agp/fb memory manager for radeon -*- linux-c -*- + * + * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. + * + * The Weather Channel (TM) funded Tungsten Graphics to develop the + * initial release of the Radeon 8500 driver under the XFree86 license. + * This notice must be preserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Keith Whitwell + */ + +#include "radeon.h" +#include "drmP.h" +#include "drm.h" +#include "radeon_drm.h" +#include "radeon_drv.h" +#include "drm_os_linux.h" + +/* Very simple allocator for agp memory, working on a static range + * already mapped into each client's address space. + */ + +static struct mem_block *split_block(struct mem_block *p, int start, int size, + int pid ) +{ + /* Maybe cut off the start of an existing block */ + if (start > p->start) { + struct mem_block *newblock = kmalloc(sizeof(*newblock), GFP_KERNEL); + if (!newblock) + goto out; + newblock->start = start; + newblock->size = p->size - (start - p->start); + newblock->pid = 0; + newblock->next = p->next; + newblock->prev = p; + p->next->prev = newblock; + p->next = newblock; + p->size -= newblock->size; + p = newblock; + } + + /* Maybe cut off the end of an existing block */ + if (size < p->size) { + struct mem_block *newblock = kmalloc(sizeof(*newblock), GFP_KERNEL); + if (!newblock) + goto out; + newblock->start = start + size; + newblock->size = p->size - size; + newblock->pid = 0; + newblock->next = p->next; + newblock->prev = p; + p->next->prev = newblock; + p->next = newblock; + p->size = size; + } + + out: + /* Our block is in the middle */ + p->pid = pid; + return p; +} + +static struct mem_block *alloc_block( struct mem_block *heap, int size, + int align2, int pid ) +{ + struct mem_block *p; + int mask = (1 << align2)-1; + + for (p = heap->next ; p != heap ; p = p->next) { + int start = (p->start + mask) & ~mask; + if (p->pid == 0 && start + size <= p->start + p->size) + return split_block( p, start, size, pid ); + } + + return NULL; +} + +static struct mem_block *find_block( struct mem_block *heap, int start ) +{ + struct mem_block *p; + + for (p = heap->next ; p != heap ; p = p->next) + if (p->start == start) + return p; + + return NULL; +} + + +static void free_block( struct mem_block *p ) +{ + p->pid = 0; + + /* Assumes a single contiguous range. Needs a special pid in + * 'heap' to stop it being subsumed. + */ + if (p->next->pid == 0) { + struct mem_block *q = p->next; + p->size += q->size; + p->next = q->next; + p->next->prev = p; + kfree(q); + } + + if (p->prev->pid == 0) { + struct mem_block *q = p->prev; + q->size += p->size; + q->next = p->next; + q->next->prev = q; + kfree(p); + } +} + +static void print_heap( struct mem_block *heap ) +{ + struct mem_block *p; + + for (p = heap->next ; p != heap ; p = p->next) + DRM_DEBUG("0x%x..0x%x (0x%x) -- owner %d\n", + p->start, p->start + p->size, + p->size, p->pid); +} + +/* Initialize. How to check for an uninitialized heap? + */ +static int init_heap(struct mem_block **heap, int start, int size) +{ + struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL); + + if (!blocks) + return -ENOMEM; + + *heap = kmalloc(sizeof(**heap), GFP_KERNEL); + if (!*heap) { + kfree( blocks ); + return -ENOMEM; + } + + blocks->start = start; + blocks->size = size; + blocks->pid = 0; + blocks->next = blocks->prev = *heap; + + memset( *heap, 0, sizeof(**heap) ); + (*heap)->pid = -1; + (*heap)->next = (*heap)->prev = blocks; + return 0; +} + + +/* Free all blocks associated with the releasing pid. + */ +void radeon_mem_release( struct mem_block *heap ) +{ + int pid = current->pid; + struct mem_block *p; + + if (!heap || !heap->next) + return; + + for (p = heap->next ; p != heap ; p = p->next) { + if (p->pid == pid) + p->pid = 0; + } + + /* Assumes a single contiguous range. Needs a special pid in + * 'heap' to stop it being subsumed. + */ + for (p = heap->next ; p != heap ; p = p->next) { + while (p->pid == 0 && p->next->pid == 0) { + struct mem_block *q = p->next; + p->size += q->size; + p->next = q->next; + p->next->prev = p; + kfree(q); + } + } +} + +/* Shutdown. + */ +void radeon_mem_takedown( struct mem_block **heap ) +{ + struct mem_block *p; + + if (!*heap) + return; + + for (p = (*heap)->next ; p != *heap ; ) { + struct mem_block *q = p; + p = p->next; + kfree(q); + } + + kfree( *heap ); + *heap = 0; +} + + + +/* IOCTL HANDLERS */ + +static struct mem_block **get_heap( drm_radeon_private_t *dev_priv, + int region ) +{ + switch( region ) { + case RADEON_MEM_REGION_AGP: + return &dev_priv->agp_heap; + case RADEON_MEM_REGION_FB: + return &dev_priv->fb_heap; + default: + return 0; + } +} + +int radeon_mem_alloc(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_mem_alloc_t alloc; + struct mem_block *block, **heap; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( alloc, (drm_radeon_mem_alloc_t *)data, + sizeof(alloc) ); + + heap = get_heap( dev_priv, alloc.region ); + if (!heap || !*heap) + return -EFAULT; + + /* Make things easier on ourselves: all allocations at least + * 4k aligned. + */ + if (alloc.alignment < 12) + alloc.alignment = 12; + + block = alloc_block( *heap, alloc.size, alloc.alignment, + current->pid ); + + if (!block) + return -ENOMEM; + + if ( copy_to_user( alloc.region_offset, &block->start, + sizeof(int) ) ) { + DRM_ERROR( "copy_to_user\n" ); + return -EFAULT; + } + + return 0; +} + + + +int radeon_mem_free(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_mem_free_t memfree; + struct mem_block *block, **heap; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( memfree, (drm_radeon_mem_free_t *)data, + sizeof(memfree) ); + + heap = get_heap( dev_priv, memfree.region ); + if (!heap || !*heap) + return -EFAULT; + + block = find_block( *heap, memfree.region_offset ); + if (!block) + return -EFAULT; + + if (block->pid != current->pid) + return -EPERM; + + free_block( block ); + return 0; +} + +int radeon_mem_init_heap(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_mem_init_heap_t initheap; + struct mem_block **heap; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( initheap, (drm_radeon_mem_init_heap_t *)data, + sizeof(initheap) ); + + heap = get_heap( dev_priv, initheap.region ); + if (!heap) + return -EFAULT; + + if (*heap) { + DRM_ERROR("heap already initialized?"); + return -EFAULT; + } + + return init_heap( heap, initheap.start, initheap.size ); +} + + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/radeon_state.c linux.21pre4-ac2/drivers/char/drm/radeon_state.c --- linux.21pre4/drivers/char/drm/radeon_state.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/radeon_state.c 2003-01-06 17:28:08.000000000 +0000 @@ -29,10 +29,11 @@ #include "radeon.h" #include "drmP.h" -#include "radeon_drv.h" #include "drm.h" -#include - +#include "drm_sarea.h" +#include "radeon_drm.h" +#include "radeon_drv.h" +#include "drm_os_linux.h" /* ================================================================ * CP hardware state programming functions @@ -47,360 +48,254 @@ box->x1, box->y1, box->x2, box->y2 ); BEGIN_RING( 4 ); - OUT_RING( CP_PACKET0( RADEON_RE_TOP_LEFT, 0 ) ); OUT_RING( (box->y1 << 16) | box->x1 ); - OUT_RING( CP_PACKET0( RADEON_RE_WIDTH_HEIGHT, 0 ) ); OUT_RING( ((box->y2 - 1) << 16) | (box->x2 - 1) ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_context( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 14 ); - - OUT_RING( CP_PACKET0( RADEON_PP_MISC, 6 ) ); - OUT_RING( ctx->pp_misc ); - OUT_RING( ctx->pp_fog_color ); - OUT_RING( ctx->re_solid_color ); - OUT_RING( ctx->rb3d_blendcntl ); - OUT_RING( ctx->rb3d_depthoffset ); - OUT_RING( ctx->rb3d_depthpitch ); - OUT_RING( ctx->rb3d_zstencilcntl ); - - OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 2 ) ); - OUT_RING( ctx->pp_cntl ); - OUT_RING( ctx->rb3d_cntl ); - OUT_RING( ctx->rb3d_coloroffset ); - - OUT_RING( CP_PACKET0( RADEON_RB3D_COLORPITCH, 0 ) ); - OUT_RING( ctx->rb3d_colorpitch ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_vertfmt( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 2 ); - - OUT_RING( CP_PACKET0( RADEON_SE_COORD_FMT, 0 ) ); - OUT_RING( ctx->se_coord_fmt ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_line( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 5 ); - - OUT_RING( CP_PACKET0( RADEON_RE_LINE_PATTERN, 1 ) ); - OUT_RING( ctx->re_line_pattern ); - OUT_RING( ctx->re_line_state ); - - OUT_RING( CP_PACKET0( RADEON_SE_LINE_WIDTH, 0 ) ); - OUT_RING( ctx->se_line_width ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_bumpmap( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 5 ); - - OUT_RING( CP_PACKET0( RADEON_PP_LUM_MATRIX, 0 ) ); - OUT_RING( ctx->pp_lum_matrix ); - - OUT_RING( CP_PACKET0( RADEON_PP_ROT_MATRIX_0, 1 ) ); - OUT_RING( ctx->pp_rot_matrix_0 ); - OUT_RING( ctx->pp_rot_matrix_1 ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_masks( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 4 ); - - OUT_RING( CP_PACKET0( RADEON_RB3D_STENCILREFMASK, 2 ) ); - OUT_RING( ctx->rb3d_stencilrefmask ); - OUT_RING( ctx->rb3d_ropcntl ); - OUT_RING( ctx->rb3d_planemask ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_viewport( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 7 ); - - OUT_RING( CP_PACKET0( RADEON_SE_VPORT_XSCALE, 5 ) ); - OUT_RING( ctx->se_vport_xscale ); - OUT_RING( ctx->se_vport_xoffset ); - OUT_RING( ctx->se_vport_yscale ); - OUT_RING( ctx->se_vport_yoffset ); - OUT_RING( ctx->se_vport_zscale ); - OUT_RING( ctx->se_vport_zoffset ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_setup( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 4 ); - - OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) ); - OUT_RING( ctx->se_cntl ); - OUT_RING( CP_PACKET0( RADEON_SE_CNTL_STATUS, 0 ) ); - OUT_RING( ctx->se_cntl_status ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_tcl( drm_radeon_private_t *dev_priv ) -{ -#ifdef TCL_ENABLE - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 29 ); - - OUT_RING( CP_PACKET0( RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 27 ) ); - OUT_RING( ctx->se_tcl_material_emmissive.red ); - OUT_RING( ctx->se_tcl_material_emmissive.green ); - OUT_RING( ctx->se_tcl_material_emmissive.blue ); - OUT_RING( ctx->se_tcl_material_emmissive.alpha ); - OUT_RING( ctx->se_tcl_material_ambient.red ); - OUT_RING( ctx->se_tcl_material_ambient.green ); - OUT_RING( ctx->se_tcl_material_ambient.blue ); - OUT_RING( ctx->se_tcl_material_ambient.alpha ); - OUT_RING( ctx->se_tcl_material_diffuse.red ); - OUT_RING( ctx->se_tcl_material_diffuse.green ); - OUT_RING( ctx->se_tcl_material_diffuse.blue ); - OUT_RING( ctx->se_tcl_material_diffuse.alpha ); - OUT_RING( ctx->se_tcl_material_specular.red ); - OUT_RING( ctx->se_tcl_material_specular.green ); - OUT_RING( ctx->se_tcl_material_specular.blue ); - OUT_RING( ctx->se_tcl_material_specular.alpha ); - OUT_RING( ctx->se_tcl_shininess ); - OUT_RING( ctx->se_tcl_output_vtx_fmt ); - OUT_RING( ctx->se_tcl_output_vtx_sel ); - OUT_RING( ctx->se_tcl_matrix_select_0 ); - OUT_RING( ctx->se_tcl_matrix_select_1 ); - OUT_RING( ctx->se_tcl_ucp_vert_blend_ctl ); - OUT_RING( ctx->se_tcl_texture_proc_ctl ); - OUT_RING( ctx->se_tcl_light_model_ctl ); - for ( i = 0 ; i < 4 ; i++ ) { - OUT_RING( ctx->se_tcl_per_light_ctl[i] ); - } - - ADVANCE_RING(); -#else - DRM_ERROR( "TCL not enabled!\n" ); -#endif -} - -static inline void radeon_emit_misc( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_context_regs_t *ctx = &sarea_priv->context_state; - RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 2 ); - - OUT_RING( CP_PACKET0( RADEON_RE_MISC, 0 ) ); - OUT_RING( ctx->re_misc ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_tex0( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_texture_regs_t *tex = &sarea_priv->tex_state[0]; - RING_LOCALS; - DRM_DEBUG( " %s: offset=0x%x\n", __FUNCTION__, tex->pp_txoffset ); - - BEGIN_RING( 9 ); - - OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_0, 5 ) ); - OUT_RING( tex->pp_txfilter ); - OUT_RING( tex->pp_txformat ); - OUT_RING( tex->pp_txoffset ); - OUT_RING( tex->pp_txcblend ); - OUT_RING( tex->pp_txablend ); - OUT_RING( tex->pp_tfactor ); - - OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_0, 0 ) ); - OUT_RING( tex->pp_border_color ); - ADVANCE_RING(); } -static inline void radeon_emit_tex1( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_texture_regs_t *tex = &sarea_priv->tex_state[1]; - RING_LOCALS; - DRM_DEBUG( " %s: offset=0x%x\n", __FUNCTION__, tex->pp_txoffset ); - - BEGIN_RING( 9 ); - - OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_1, 5 ) ); - OUT_RING( tex->pp_txfilter ); - OUT_RING( tex->pp_txformat ); - OUT_RING( tex->pp_txoffset ); - OUT_RING( tex->pp_txcblend ); - OUT_RING( tex->pp_txablend ); - OUT_RING( tex->pp_tfactor ); - - OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_1, 0 ) ); - OUT_RING( tex->pp_border_color ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_tex2( drm_radeon_private_t *dev_priv ) +/* Emit 1.1 state + */ +static void radeon_emit_state( drm_radeon_private_t *dev_priv, + drm_radeon_context_regs_t *ctx, + drm_radeon_texture_regs_t *tex, + unsigned int dirty ) { - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_texture_regs_t *tex = &sarea_priv->tex_state[2]; RING_LOCALS; - DRM_DEBUG( " %s\n", __FUNCTION__ ); - - BEGIN_RING( 9 ); - - OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_2, 5 ) ); - OUT_RING( tex->pp_txfilter ); - OUT_RING( tex->pp_txformat ); - OUT_RING( tex->pp_txoffset ); - OUT_RING( tex->pp_txcblend ); - OUT_RING( tex->pp_txablend ); - OUT_RING( tex->pp_tfactor ); - - OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_2, 0 ) ); - OUT_RING( tex->pp_border_color ); - - ADVANCE_RING(); -} - -static inline void radeon_emit_state( drm_radeon_private_t *dev_priv ) -{ - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - unsigned int dirty = sarea_priv->dirty; - - DRM_DEBUG( "%s: dirty=0x%08x\n", __FUNCTION__, dirty ); + DRM_DEBUG( "dirty=0x%08x\n", dirty ); if ( dirty & RADEON_UPLOAD_CONTEXT ) { - radeon_emit_context( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_CONTEXT; + BEGIN_RING( 14 ); + OUT_RING( CP_PACKET0( RADEON_PP_MISC, 6 ) ); + OUT_RING( ctx->pp_misc ); + OUT_RING( ctx->pp_fog_color ); + OUT_RING( ctx->re_solid_color ); + OUT_RING( ctx->rb3d_blendcntl ); + OUT_RING( ctx->rb3d_depthoffset ); + OUT_RING( ctx->rb3d_depthpitch ); + OUT_RING( ctx->rb3d_zstencilcntl ); + OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 2 ) ); + OUT_RING( ctx->pp_cntl ); + OUT_RING( ctx->rb3d_cntl ); + OUT_RING( ctx->rb3d_coloroffset ); + OUT_RING( CP_PACKET0( RADEON_RB3D_COLORPITCH, 0 ) ); + OUT_RING( ctx->rb3d_colorpitch ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_VERTFMT ) { - radeon_emit_vertfmt( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_VERTFMT; + BEGIN_RING( 2 ); + OUT_RING( CP_PACKET0( RADEON_SE_COORD_FMT, 0 ) ); + OUT_RING( ctx->se_coord_fmt ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_LINE ) { - radeon_emit_line( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_LINE; + BEGIN_RING( 5 ); + OUT_RING( CP_PACKET0( RADEON_RE_LINE_PATTERN, 1 ) ); + OUT_RING( ctx->re_line_pattern ); + OUT_RING( ctx->re_line_state ); + OUT_RING( CP_PACKET0( RADEON_SE_LINE_WIDTH, 0 ) ); + OUT_RING( ctx->se_line_width ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_BUMPMAP ) { - radeon_emit_bumpmap( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_BUMPMAP; + BEGIN_RING( 5 ); + OUT_RING( CP_PACKET0( RADEON_PP_LUM_MATRIX, 0 ) ); + OUT_RING( ctx->pp_lum_matrix ); + OUT_RING( CP_PACKET0( RADEON_PP_ROT_MATRIX_0, 1 ) ); + OUT_RING( ctx->pp_rot_matrix_0 ); + OUT_RING( ctx->pp_rot_matrix_1 ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_MASKS ) { - radeon_emit_masks( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_MASKS; + BEGIN_RING( 4 ); + OUT_RING( CP_PACKET0( RADEON_RB3D_STENCILREFMASK, 2 ) ); + OUT_RING( ctx->rb3d_stencilrefmask ); + OUT_RING( ctx->rb3d_ropcntl ); + OUT_RING( ctx->rb3d_planemask ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_VIEWPORT ) { - radeon_emit_viewport( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_VIEWPORT; + BEGIN_RING( 7 ); + OUT_RING( CP_PACKET0( RADEON_SE_VPORT_XSCALE, 5 ) ); + OUT_RING( ctx->se_vport_xscale ); + OUT_RING( ctx->se_vport_xoffset ); + OUT_RING( ctx->se_vport_yscale ); + OUT_RING( ctx->se_vport_yoffset ); + OUT_RING( ctx->se_vport_zscale ); + OUT_RING( ctx->se_vport_zoffset ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_SETUP ) { - radeon_emit_setup( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_SETUP; - } - - if ( dirty & RADEON_UPLOAD_TCL ) { -#ifdef TCL_ENABLE - radeon_emit_tcl( dev_priv ); -#endif - sarea_priv->dirty &= ~RADEON_UPLOAD_TCL; + BEGIN_RING( 4 ); + OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) ); + OUT_RING( ctx->se_cntl ); + OUT_RING( CP_PACKET0( RADEON_SE_CNTL_STATUS, 0 ) ); + OUT_RING( ctx->se_cntl_status ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_MISC ) { - radeon_emit_misc( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_MISC; + BEGIN_RING( 2 ); + OUT_RING( CP_PACKET0( RADEON_RE_MISC, 0 ) ); + OUT_RING( ctx->re_misc ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_TEX0 ) { - radeon_emit_tex0( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_TEX0; + BEGIN_RING( 9 ); + OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_0, 5 ) ); + OUT_RING( tex[0].pp_txfilter ); + OUT_RING( tex[0].pp_txformat ); + OUT_RING( tex[0].pp_txoffset ); + OUT_RING( tex[0].pp_txcblend ); + OUT_RING( tex[0].pp_txablend ); + OUT_RING( tex[0].pp_tfactor ); + OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_0, 0 ) ); + OUT_RING( tex[0].pp_border_color ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_TEX1 ) { - radeon_emit_tex1( dev_priv ); - sarea_priv->dirty &= ~RADEON_UPLOAD_TEX1; + BEGIN_RING( 9 ); + OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_1, 5 ) ); + OUT_RING( tex[1].pp_txfilter ); + OUT_RING( tex[1].pp_txformat ); + OUT_RING( tex[1].pp_txoffset ); + OUT_RING( tex[1].pp_txcblend ); + OUT_RING( tex[1].pp_txablend ); + OUT_RING( tex[1].pp_tfactor ); + OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_1, 0 ) ); + OUT_RING( tex[1].pp_border_color ); + ADVANCE_RING(); } if ( dirty & RADEON_UPLOAD_TEX2 ) { -#if 0 - radeon_emit_tex2( dev_priv ); -#endif - sarea_priv->dirty &= ~RADEON_UPLOAD_TEX2; + BEGIN_RING( 9 ); + OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_2, 5 ) ); + OUT_RING( tex[2].pp_txfilter ); + OUT_RING( tex[2].pp_txformat ); + OUT_RING( tex[2].pp_txoffset ); + OUT_RING( tex[2].pp_txcblend ); + OUT_RING( tex[2].pp_txablend ); + OUT_RING( tex[2].pp_tfactor ); + OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_2, 0 ) ); + OUT_RING( tex[2].pp_border_color ); + ADVANCE_RING(); + } +} + +/* Emit 1.2 state + */ +static void radeon_emit_state2( drm_radeon_private_t *dev_priv, + drm_radeon_state_t *state ) +{ + RING_LOCALS; + + if (state->dirty & RADEON_UPLOAD_ZBIAS) { + BEGIN_RING( 3 ); + OUT_RING( CP_PACKET0( RADEON_SE_ZBIAS_FACTOR, 1 ) ); + OUT_RING( state->context2.se_zbias_factor ); + OUT_RING( state->context2.se_zbias_constant ); + ADVANCE_RING(); } - sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | - RADEON_UPLOAD_TEX1IMAGES | - RADEON_UPLOAD_TEX2IMAGES | - RADEON_REQUIRE_QUIESCENCE); + radeon_emit_state( dev_priv, &state->context, + state->tex, state->dirty ); } +/* New (1.3) state mechanism. 3 commands (packet, scalar, vector) in + * 1.3 cmdbuffers allow all previous state to be updated as well as + * the tcl scalar and vector areas. + */ +static struct { + int start; + int len; + const char *name; +} packet[RADEON_MAX_STATE_PACKETS] = { + { RADEON_PP_MISC,7,"RADEON_PP_MISC" }, + { RADEON_PP_CNTL,3,"RADEON_PP_CNTL" }, + { RADEON_RB3D_COLORPITCH,1,"RADEON_RB3D_COLORPITCH" }, + { RADEON_RE_LINE_PATTERN,2,"RADEON_RE_LINE_PATTERN" }, + { RADEON_SE_LINE_WIDTH,1,"RADEON_SE_LINE_WIDTH" }, + { RADEON_PP_LUM_MATRIX,1,"RADEON_PP_LUM_MATRIX" }, + { RADEON_PP_ROT_MATRIX_0,2,"RADEON_PP_ROT_MATRIX_0" }, + { RADEON_RB3D_STENCILREFMASK,3,"RADEON_RB3D_STENCILREFMASK" }, + { RADEON_SE_VPORT_XSCALE,6,"RADEON_SE_VPORT_XSCALE" }, + { RADEON_SE_CNTL,2,"RADEON_SE_CNTL" }, + { RADEON_SE_CNTL_STATUS,1,"RADEON_SE_CNTL_STATUS" }, + { RADEON_RE_MISC,1,"RADEON_RE_MISC" }, + { RADEON_PP_TXFILTER_0,6,"RADEON_PP_TXFILTER_0" }, + { RADEON_PP_BORDER_COLOR_0,1,"RADEON_PP_BORDER_COLOR_0" }, + { RADEON_PP_TXFILTER_1,6,"RADEON_PP_TXFILTER_1" }, + { RADEON_PP_BORDER_COLOR_1,1,"RADEON_PP_BORDER_COLOR_1" }, + { RADEON_PP_TXFILTER_2,6,"RADEON_PP_TXFILTER_2" }, + { RADEON_PP_BORDER_COLOR_2,1,"RADEON_PP_BORDER_COLOR_2" }, + { RADEON_SE_ZBIAS_FACTOR,2,"RADEON_SE_ZBIAS_FACTOR" }, + { RADEON_SE_TCL_OUTPUT_VTX_FMT,11,"RADEON_SE_TCL_OUTPUT_VTX_FMT" }, + { RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED,17,"RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED" }, + { R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0" }, + { R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1" }, + { R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2" }, + { R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3" }, + { R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4" }, + { R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5" }, + { R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6" }, + { R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7" }, + { R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0" }, + { R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0" }, + { R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0" }, + { R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL" }, + { R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0" }, + { R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2" }, + { R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL" }, + { R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0" }, + { R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1" }, + { R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2" }, + { R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3" }, + { R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4" }, + { R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5" }, + { R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0" }, + { R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1" }, + { R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2" }, + { R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3" }, + { R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4" }, + { R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5" }, + { R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL" }, + { R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL" }, + { R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3" }, + { R200_PP_CNTL_X, 1, "R200_PP_CNTL_X" }, + { R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET" }, + { R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL" }, + { R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0" }, + { R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1" }, + { R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2" }, + { R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS" }, + { R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL" }, + { R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE" }, + { R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4, "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0" }, + { R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0" }, /* 61 */ + { R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0" }, /* 62 */ + { R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1" }, + { R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1" }, + { R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2" }, + { R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2" }, + { R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3" }, + { R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3" }, + { R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4" }, + { R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4" }, + { R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5" }, + { R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5" }, +}; + + -#if RADEON_PERFORMANCE_BOXES /* ================================================================ * Performance monitoring functions */ @@ -409,10 +304,12 @@ int x, int y, int w, int h, int r, int g, int b ) { - u32 pitch, offset; u32 color; RING_LOCALS; + x += dev_priv->sarea_priv->boxes[0].x1; + y += dev_priv->sarea_priv->boxes[0].y1; + switch ( dev_priv->color_fmt ) { case RADEON_COLOR_FORMAT_RGB565: color = (((r & 0xf8) << 8) | @@ -425,8 +322,11 @@ break; } - offset = dev_priv->back_offset; - pitch = dev_priv->back_pitch >> 3; + BEGIN_RING( 4 ); + RADEON_WAIT_UNTIL_3D_IDLE(); + OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) ); + OUT_RING( 0xffffffff ); + ADVANCE_RING(); BEGIN_RING( 6 ); @@ -438,7 +338,12 @@ RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS ); - OUT_RING( (pitch << 22) | (offset >> 5) ); + if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) { + OUT_RING( dev_priv->front_pitch_offset ); + } else { + OUT_RING( dev_priv->back_pitch_offset ); + } + OUT_RING( color ); OUT_RING( (x << 16) | y ); @@ -449,53 +354,77 @@ static void radeon_cp_performance_boxes( drm_radeon_private_t *dev_priv ) { - if ( atomic_read( &dev_priv->idle_count ) == 0 ) { - radeon_clear_box( dev_priv, 64, 4, 8, 8, 0, 255, 0 ); - } else { - atomic_set( &dev_priv->idle_count, 0 ); + /* Collapse various things into a wait flag -- trying to + * guess if userspase slept -- better just to have them tell us. + */ + if (dev_priv->stats.last_frame_reads > 1 || + dev_priv->stats.last_clear_reads > dev_priv->stats.clears) { + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; } -} -#endif + if (dev_priv->stats.freelist_loops) { + dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; + } + /* Purple box for page flipping + */ + if ( dev_priv->stats.boxes & RADEON_BOX_FLIP ) + radeon_clear_box( dev_priv, 4, 4, 8, 8, 255, 0, 255 ); + /* Red box if we have to wait for idle at any point + */ + if ( dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE ) + radeon_clear_box( dev_priv, 16, 4, 8, 8, 255, 0, 0 ); + + /* Blue box: lost context? + */ + + /* Yellow box for texture swaps + */ + if ( dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD ) + radeon_clear_box( dev_priv, 40, 4, 8, 8, 255, 255, 0 ); + + /* Green box if hardware never idles (as far as we can tell) + */ + if ( !(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE) ) + radeon_clear_box( dev_priv, 64, 4, 8, 8, 0, 255, 0 ); + + + /* Draw bars indicating number of buffers allocated + * (not a great measure, easily confused) + */ + if (dev_priv->stats.requested_bufs) { + if (dev_priv->stats.requested_bufs > 100) + dev_priv->stats.requested_bufs = 100; + + radeon_clear_box( dev_priv, 4, 16, + dev_priv->stats.requested_bufs, 4, + 196, 128, 128 ); + } + + memset( &dev_priv->stats, 0, sizeof(dev_priv->stats) ); + +} /* ================================================================ * CP command dispatch functions */ -static void radeon_print_dirty( const char *msg, unsigned int flags ) -{ - DRM_DEBUG( "%s: (0x%x) %s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", - msg, - flags, - (flags & RADEON_UPLOAD_CONTEXT) ? "context, " : "", - (flags & RADEON_UPLOAD_VERTFMT) ? "vertfmt, " : "", - (flags & RADEON_UPLOAD_LINE) ? "line, " : "", - (flags & RADEON_UPLOAD_BUMPMAP) ? "bumpmap, " : "", - (flags & RADEON_UPLOAD_MASKS) ? "masks, " : "", - (flags & RADEON_UPLOAD_VIEWPORT) ? "viewport, " : "", - (flags & RADEON_UPLOAD_SETUP) ? "setup, " : "", - (flags & RADEON_UPLOAD_TCL) ? "tcl, " : "", - (flags & RADEON_UPLOAD_MISC) ? "misc, " : "", - (flags & RADEON_UPLOAD_TEX0) ? "tex0, " : "", - (flags & RADEON_UPLOAD_TEX1) ? "tex1, " : "", - (flags & RADEON_UPLOAD_TEX2) ? "tex2, " : "", - (flags & RADEON_UPLOAD_CLIPRECTS) ? "cliprects, " : "", - (flags & RADEON_REQUIRE_QUIESCENCE) ? "quiescence, " : "" ); -} - static void radeon_cp_dispatch_clear( drm_device_t *dev, drm_radeon_clear_t *clear, drm_radeon_clear_rect_t *depth_boxes ) { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear; int nbox = sarea_priv->nbox; drm_clip_rect_t *pbox = sarea_priv->boxes; unsigned int flags = clear->flags; + u32 rb3d_cntl = 0, rb3d_stencilrefmask= 0; int i; RING_LOCALS; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "flags = 0x%x\n", flags ); + + dev_priv->stats.clears++; if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) { unsigned int tmp = flags; @@ -505,127 +434,277 @@ if ( tmp & RADEON_BACK ) flags |= RADEON_FRONT; } - for ( i = 0 ; i < nbox ; i++ ) { - int x = pbox[i].x1; - int y = pbox[i].y1; - int w = pbox[i].x2 - x; - int h = pbox[i].y2 - y; + if ( flags & (RADEON_FRONT | RADEON_BACK) ) { - DRM_DEBUG( "dispatch clear %d,%d-%d,%d flags 0x%x\n", - x, y, w, h, flags ); + BEGIN_RING( 4 ); - if ( flags & (RADEON_FRONT | RADEON_BACK) ) { - BEGIN_RING( 4 ); + /* Ensure the 3D stream is idle before doing a + * 2D fill to clear the front or back buffer. + */ + RADEON_WAIT_UNTIL_3D_IDLE(); + + OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) ); + OUT_RING( clear->color_mask ); - /* Ensure the 3D stream is idle before doing a - * 2D fill to clear the front or back buffer. - */ - RADEON_WAIT_UNTIL_3D_IDLE(); + ADVANCE_RING(); - OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) ); - OUT_RING( clear->color_mask ); + /* Make sure we restore the 3D state next time. + */ + dev_priv->sarea_priv->ctx_owner = 0; - ADVANCE_RING(); + for ( i = 0 ; i < nbox ; i++ ) { + int x = pbox[i].x1; + int y = pbox[i].y1; + int w = pbox[i].x2 - x; + int h = pbox[i].y2 - y; + + DRM_DEBUG( "dispatch clear %d,%d-%d,%d flags 0x%x\n", + x, y, w, h, flags ); + + if ( flags & RADEON_FRONT ) { + BEGIN_RING( 6 ); + + OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) ); + OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL | + RADEON_GMC_BRUSH_SOLID_COLOR | + (dev_priv->color_fmt << 8) | + RADEON_GMC_SRC_DATATYPE_COLOR | + RADEON_ROP3_P | + RADEON_GMC_CLR_CMP_CNTL_DIS ); + + OUT_RING( dev_priv->front_pitch_offset ); + OUT_RING( clear->clear_color ); + + OUT_RING( (x << 16) | y ); + OUT_RING( (w << 16) | h ); + + ADVANCE_RING(); + } + + if ( flags & RADEON_BACK ) { + BEGIN_RING( 6 ); + + OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) ); + OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL | + RADEON_GMC_BRUSH_SOLID_COLOR | + (dev_priv->color_fmt << 8) | + RADEON_GMC_SRC_DATATYPE_COLOR | + RADEON_ROP3_P | + RADEON_GMC_CLR_CMP_CNTL_DIS ); + + OUT_RING( dev_priv->back_pitch_offset ); + OUT_RING( clear->clear_color ); - /* Make sure we restore the 3D state next time. - */ - dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT | - RADEON_UPLOAD_MASKS); + OUT_RING( (x << 16) | y ); + OUT_RING( (w << 16) | h ); + + ADVANCE_RING(); + } } + } - if ( flags & RADEON_FRONT ) { - BEGIN_RING( 6 ); + /* We have to clear the depth and/or stencil buffers by + * rendering a quad into just those buffers. Thus, we have to + * make sure the 3D engine is configured correctly. + */ + if ( dev_priv->is_r200 && + (flags & (RADEON_DEPTH | RADEON_STENCIL)) ) { - OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) ); - OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL | - RADEON_GMC_BRUSH_SOLID_COLOR | - (dev_priv->color_fmt << 8) | - RADEON_GMC_SRC_DATATYPE_COLOR | - RADEON_ROP3_P | - RADEON_GMC_CLR_CMP_CNTL_DIS ); + int tempPP_CNTL; + int tempRE_CNTL; + int tempRB3D_CNTL; + int tempRB3D_ZSTENCILCNTL; + int tempRB3D_STENCILREFMASK; + int tempRB3D_PLANEMASK; + int tempSE_CNTL; + int tempSE_VTE_CNTL; + int tempSE_VTX_FMT_0; + int tempSE_VTX_FMT_1; + int tempSE_VAP_CNTL; + int tempRE_AUX_SCISSOR_CNTL; - OUT_RING( dev_priv->front_pitch_offset ); - OUT_RING( clear->clear_color ); + tempPP_CNTL = 0; + tempRE_CNTL = 0; - OUT_RING( (x << 16) | y ); - OUT_RING( (w << 16) | h ); + tempRB3D_CNTL = depth_clear->rb3d_cntl; + tempRB3D_CNTL &= ~(1<<15); /* unset radeon magic flag */ - ADVANCE_RING(); + tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl; + tempRB3D_STENCILREFMASK = 0x0; + + tempSE_CNTL = depth_clear->se_cntl; + + + + /* Disable TCL */ + + tempSE_VAP_CNTL = (/* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK | */ + (0x9 << SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT)); + + tempRB3D_PLANEMASK = 0x0; + + tempRE_AUX_SCISSOR_CNTL = 0x0; + + tempSE_VTE_CNTL = + SE_VTE_CNTL__VTX_XY_FMT_MASK | + SE_VTE_CNTL__VTX_Z_FMT_MASK; + + /* Vertex format (X, Y, Z, W)*/ + tempSE_VTX_FMT_0 = + SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK | + SE_VTX_FMT_0__VTX_W0_PRESENT_MASK; + tempSE_VTX_FMT_1 = 0x0; + + + /* + * Depth buffer specific enables + */ + if (flags & RADEON_DEPTH) { + /* Enable depth buffer */ + tempRB3D_CNTL |= RADEON_Z_ENABLE; + } else { + /* Disable depth buffer */ + tempRB3D_CNTL &= ~RADEON_Z_ENABLE; } - if ( flags & RADEON_BACK ) { - BEGIN_RING( 6 ); + /* + * Stencil buffer specific enables + */ + if ( flags & RADEON_STENCIL ) { + tempRB3D_CNTL |= RADEON_STENCIL_ENABLE; + tempRB3D_STENCILREFMASK = clear->depth_mask; + } else { + tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE; + tempRB3D_STENCILREFMASK = 0x00000000; + } - OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) ); - OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL | - RADEON_GMC_BRUSH_SOLID_COLOR | - (dev_priv->color_fmt << 8) | - RADEON_GMC_SRC_DATATYPE_COLOR | - RADEON_ROP3_P | - RADEON_GMC_CLR_CMP_CNTL_DIS ); + BEGIN_RING( 26 ); + RADEON_WAIT_UNTIL_2D_IDLE(); - OUT_RING( dev_priv->back_pitch_offset ); - OUT_RING( clear->clear_color ); + OUT_RING_REG( RADEON_PP_CNTL, tempPP_CNTL ); + OUT_RING_REG( R200_RE_CNTL, tempRE_CNTL ); + OUT_RING_REG( RADEON_RB3D_CNTL, tempRB3D_CNTL ); + OUT_RING_REG( RADEON_RB3D_ZSTENCILCNTL, + tempRB3D_ZSTENCILCNTL ); + OUT_RING_REG( RADEON_RB3D_STENCILREFMASK, + tempRB3D_STENCILREFMASK ); + OUT_RING_REG( RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK ); + OUT_RING_REG( RADEON_SE_CNTL, tempSE_CNTL ); + OUT_RING_REG( R200_SE_VTE_CNTL, tempSE_VTE_CNTL ); + OUT_RING_REG( R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0 ); + OUT_RING_REG( R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1 ); + OUT_RING_REG( R200_SE_VAP_CNTL, tempSE_VAP_CNTL ); + OUT_RING_REG( R200_RE_AUX_SCISSOR_CNTL, + tempRE_AUX_SCISSOR_CNTL ); + ADVANCE_RING(); - OUT_RING( (x << 16) | y ); - OUT_RING( (w << 16) | h ); + /* Make sure we restore the 3D state next time. + */ + dev_priv->sarea_priv->ctx_owner = 0; - ADVANCE_RING(); + for ( i = 0 ; i < nbox ; i++ ) { + + /* Funny that this should be required -- + * sets top-left? + */ + radeon_emit_clip_rect( dev_priv, + &sarea_priv->boxes[i] ); + BEGIN_RING( 14 ); + OUT_RING( CP_PACKET3( R200_3D_DRAW_IMMD_2, 12 ) ); + OUT_RING( (RADEON_PRIM_TYPE_RECT_LIST | + RADEON_PRIM_WALK_RING | + (3 << RADEON_NUM_VERTICES_SHIFT)) ); + OUT_RING( depth_boxes[i].ui[CLEAR_X1] ); + OUT_RING( depth_boxes[i].ui[CLEAR_Y1] ); + OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] ); + OUT_RING( 0x3f800000 ); + OUT_RING( depth_boxes[i].ui[CLEAR_X1] ); + OUT_RING( depth_boxes[i].ui[CLEAR_Y2] ); + OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] ); + OUT_RING( 0x3f800000 ); + OUT_RING( depth_boxes[i].ui[CLEAR_X2] ); + OUT_RING( depth_boxes[i].ui[CLEAR_Y2] ); + OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] ); + OUT_RING( 0x3f800000 ); + ADVANCE_RING(); } + } + else if ( (flags & (RADEON_DEPTH | RADEON_STENCIL)) ) { + + rb3d_cntl = depth_clear->rb3d_cntl; if ( flags & RADEON_DEPTH ) { - drm_radeon_depth_clear_t *depth_clear = - &dev_priv->depth_clear; + rb3d_cntl |= RADEON_Z_ENABLE; + } else { + rb3d_cntl &= ~RADEON_Z_ENABLE; + } - if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { - radeon_emit_state( dev_priv ); - } + if ( flags & RADEON_STENCIL ) { + rb3d_cntl |= RADEON_STENCIL_ENABLE; + rb3d_stencilrefmask = clear->depth_mask; /* misnamed field */ + } else { + rb3d_cntl &= ~RADEON_STENCIL_ENABLE; + rb3d_stencilrefmask = 0x00000000; + } - /* FIXME: Render a rectangle to clear the depth - * buffer. So much for those "fast Z clears"... - */ - BEGIN_RING( 23 ); + BEGIN_RING( 13 ); + RADEON_WAIT_UNTIL_2D_IDLE(); - RADEON_WAIT_UNTIL_2D_IDLE(); + OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 1 ) ); + OUT_RING( 0x00000000 ); + OUT_RING( rb3d_cntl ); + + OUT_RING_REG( RADEON_RB3D_ZSTENCILCNTL, + depth_clear->rb3d_zstencilcntl ); + OUT_RING_REG( RADEON_RB3D_STENCILREFMASK, + rb3d_stencilrefmask ); + OUT_RING_REG( RADEON_RB3D_PLANEMASK, + 0x00000000 ); + OUT_RING_REG( RADEON_SE_CNTL, + depth_clear->se_cntl ); + ADVANCE_RING(); - OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 1 ) ); - OUT_RING( 0x00000000 ); - OUT_RING( depth_clear->rb3d_cntl ); - OUT_RING( CP_PACKET0( RADEON_RB3D_ZSTENCILCNTL, 0 ) ); - OUT_RING( depth_clear->rb3d_zstencilcntl ); - OUT_RING( CP_PACKET0( RADEON_RB3D_PLANEMASK, 0 ) ); - OUT_RING( 0x00000000 ); - OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) ); - OUT_RING( depth_clear->se_cntl ); + /* Make sure we restore the 3D state next time. + */ + dev_priv->sarea_priv->ctx_owner = 0; - OUT_RING( CP_PACKET3( RADEON_3D_DRAW_IMMD, 10 ) ); - OUT_RING( RADEON_VTX_Z_PRESENT ); + for ( i = 0 ; i < nbox ; i++ ) { + + /* Funny that this should be required -- + * sets top-left? + */ + radeon_emit_clip_rect( dev_priv, + &sarea_priv->boxes[i] ); + + BEGIN_RING( 15 ); + + OUT_RING( CP_PACKET3( RADEON_3D_DRAW_IMMD, 13 ) ); + OUT_RING( RADEON_VTX_Z_PRESENT | + RADEON_VTX_PKCOLOR_PRESENT); OUT_RING( (RADEON_PRIM_TYPE_RECT_LIST | RADEON_PRIM_WALK_RING | RADEON_MAOS_ENABLE | RADEON_VTX_FMT_RADEON_MODE | (3 << RADEON_NUM_VERTICES_SHIFT)) ); + OUT_RING( depth_boxes[i].ui[CLEAR_X1] ); OUT_RING( depth_boxes[i].ui[CLEAR_Y1] ); OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] ); + OUT_RING( 0x0 ); OUT_RING( depth_boxes[i].ui[CLEAR_X1] ); OUT_RING( depth_boxes[i].ui[CLEAR_Y2] ); OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] ); + OUT_RING( 0x0 ); OUT_RING( depth_boxes[i].ui[CLEAR_X2] ); OUT_RING( depth_boxes[i].ui[CLEAR_Y2] ); OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] ); + OUT_RING( 0x0 ); ADVANCE_RING(); - - /* Make sure we restore the 3D state next time. - */ - dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT | - RADEON_UPLOAD_SETUP | - RADEON_UPLOAD_MASKS); } } @@ -651,13 +730,13 @@ drm_clip_rect_t *pbox = sarea_priv->boxes; int i; RING_LOCALS; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); -#if RADEON_PERFORMANCE_BOXES /* Do some trivial performance monitoring... */ - radeon_cp_performance_boxes( dev_priv ); -#endif + if (dev_priv->do_boxes) + radeon_cp_performance_boxes( dev_priv ); + /* Wait for the 3D stream to idle before dispatching the bitblt. * This will prevent data corruption between the two streams. @@ -689,9 +768,17 @@ RADEON_DP_SRC_SOURCE_MEMORY | RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS ); - - OUT_RING( dev_priv->back_pitch_offset ); - OUT_RING( dev_priv->front_pitch_offset ); + + /* Make this work even if front & back are flipped: + */ + if (dev_priv->current_page == 0) { + OUT_RING( dev_priv->back_pitch_offset ); + OUT_RING( dev_priv->front_pitch_offset ); + } + else { + OUT_RING( dev_priv->front_pitch_offset ); + OUT_RING( dev_priv->back_pitch_offset ); + } OUT_RING( (x << 16) | y ); OUT_RING( (x << 16) | y ); @@ -717,29 +804,33 @@ static void radeon_cp_dispatch_flip( drm_device_t *dev ) { drm_radeon_private_t *dev_priv = dev->dev_private; - RING_LOCALS; - DRM_DEBUG( "%s: page=%d\n", __FUNCTION__, dev_priv->current_page ); + drm_sarea_t *sarea = (drm_sarea_t *)dev_priv->sarea->handle; + int offset = (dev_priv->current_page == 1) + ? dev_priv->front_offset : dev_priv->back_offset; + RING_LOCALS; + DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n", + __FUNCTION__, + dev_priv->current_page, + dev_priv->sarea_priv->pfCurrentPage); -#if RADEON_PERFORMANCE_BOXES /* Do some trivial performance monitoring... */ - radeon_cp_performance_boxes( dev_priv ); -#endif + if (dev_priv->do_boxes) { + dev_priv->stats.boxes |= RADEON_BOX_FLIP; + radeon_cp_performance_boxes( dev_priv ); + } + /* Update the frame offsets for both CRTCs + */ BEGIN_RING( 6 ); RADEON_WAIT_UNTIL_3D_IDLE(); - RADEON_WAIT_UNTIL_PAGE_FLIPPED(); - - OUT_RING( CP_PACKET0( RADEON_CRTC_OFFSET, 0 ) ); - - if ( dev_priv->current_page == 0 ) { - OUT_RING( dev_priv->back_offset ); - dev_priv->current_page = 1; - } else { - OUT_RING( dev_priv->front_offset ); - dev_priv->current_page = 0; - } + OUT_RING_REG( RADEON_CRTC_OFFSET, ( ( sarea->frame.y * dev_priv->front_pitch + + sarea->frame.x + * ( dev_priv->color_fmt - 2 ) ) & ~7 ) + + offset ); + OUT_RING_REG( RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base + + offset ); ADVANCE_RING(); @@ -748,6 +839,8 @@ * performing the swapbuffer ioctl. */ dev_priv->sarea_priv->last_frame++; + dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page = + 1 - dev_priv->current_page; BEGIN_RING( 2 ); @@ -756,82 +849,118 @@ ADVANCE_RING(); } +static int bad_prim_vertex_nr( int primitive, int nr ) +{ + switch (primitive & RADEON_PRIM_TYPE_MASK) { + case RADEON_PRIM_TYPE_NONE: + case RADEON_PRIM_TYPE_POINT: + return nr < 1; + case RADEON_PRIM_TYPE_LINE: + return (nr & 1) || nr == 0; + case RADEON_PRIM_TYPE_LINE_STRIP: + return nr < 2; + case RADEON_PRIM_TYPE_TRI_LIST: + case RADEON_PRIM_TYPE_3VRT_POINT_LIST: + case RADEON_PRIM_TYPE_3VRT_LINE_LIST: + case RADEON_PRIM_TYPE_RECT_LIST: + return nr % 3 || nr == 0; + case RADEON_PRIM_TYPE_TRI_FAN: + case RADEON_PRIM_TYPE_TRI_STRIP: + return nr < 3; + default: + return 1; + } +} + + + +typedef struct { + unsigned int start; + unsigned int finish; + unsigned int prim; + unsigned int numverts; + unsigned int offset; + unsigned int vc_format; +} drm_radeon_tcl_prim_t; + static void radeon_cp_dispatch_vertex( drm_device_t *dev, - drm_buf_t *buf ) + drm_buf_t *buf, + drm_radeon_tcl_prim_t *prim, + drm_clip_rect_t *boxes, + int nbox ) + { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_buf_priv_t *buf_priv = buf->dev_private; - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - int format = sarea_priv->vc_format; - int offset = dev_priv->agp_buffers_offset + buf->offset; - int size = buf->used; - int prim = buf_priv->prim; + drm_clip_rect_t box; + int offset = dev_priv->agp_buffers_offset + buf->offset + prim->start; + int numverts = (int)prim->numverts; int i = 0; RING_LOCALS; - DRM_DEBUG( "%s: nbox=%d\n", __FUNCTION__, sarea_priv->nbox ); - - if ( 0 ) - radeon_print_dirty( "dispatch_vertex", sarea_priv->dirty ); - if ( buf->used ) { - buf_priv->dispatched = 1; + DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n", + prim->prim, + prim->vc_format, + prim->start, + prim->finish, + prim->numverts); + + if (bad_prim_vertex_nr( prim->prim, prim->numverts )) { + DRM_ERROR( "bad prim %x numverts %d\n", + prim->prim, prim->numverts ); + return; + } + + do { + /* Emit the next cliprect */ + if ( i < nbox ) { + if (__copy_from_user( &box, &boxes[i], sizeof(box) )) + return; - if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { - radeon_emit_state( dev_priv ); + radeon_emit_clip_rect( dev_priv, &box ); } - do { - /* Emit the next set of up to three cliprects */ - if ( i < sarea_priv->nbox ) { - radeon_emit_clip_rect( dev_priv, - &sarea_priv->boxes[i] ); - } + /* Emit the vertex buffer rendering commands */ + BEGIN_RING( 5 ); - /* Emit the vertex buffer rendering commands */ - BEGIN_RING( 5 ); + OUT_RING( CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, 3 ) ); + OUT_RING( offset ); + OUT_RING( numverts ); + OUT_RING( prim->vc_format ); + OUT_RING( prim->prim | RADEON_PRIM_WALK_LIST | + RADEON_COLOR_ORDER_RGBA | + RADEON_VTX_FMT_RADEON_MODE | + (numverts << RADEON_NUM_VERTICES_SHIFT) ); - OUT_RING( CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, 3 ) ); - OUT_RING( offset ); - OUT_RING( size ); - OUT_RING( format ); - OUT_RING( prim | RADEON_PRIM_WALK_LIST | - RADEON_COLOR_ORDER_RGBA | - RADEON_VTX_FMT_RADEON_MODE | - (size << RADEON_NUM_VERTICES_SHIFT) ); + ADVANCE_RING(); - ADVANCE_RING(); + i++; + } while ( i < nbox ); +} - i++; - } while ( i < sarea_priv->nbox ); - } - if ( buf_priv->discard ) { - buf_priv->age = dev_priv->sarea_priv->last_dispatch; - /* Emit the vertex buffer age */ - BEGIN_RING( 2 ); - RADEON_DISPATCH_AGE( buf_priv->age ); - ADVANCE_RING(); +static void radeon_cp_discard_buffer( drm_device_t *dev, drm_buf_t *buf ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_buf_priv_t *buf_priv = buf->dev_private; + RING_LOCALS; - buf->pending = 1; - buf->used = 0; - /* FIXME: Check dispatched field */ - buf_priv->dispatched = 0; - } + buf_priv->age = ++dev_priv->sarea_priv->last_dispatch; - dev_priv->sarea_priv->last_dispatch++; + /* Emit the vertex buffer age */ + BEGIN_RING( 2 ); + RADEON_DISPATCH_AGE( buf_priv->age ); + ADVANCE_RING(); - sarea_priv->dirty &= ~RADEON_UPLOAD_CLIPRECTS; - sarea_priv->nbox = 0; + buf->pending = 1; + buf->used = 0; } - static void radeon_cp_dispatch_indirect( drm_device_t *dev, drm_buf_t *buf, int start, int end ) { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_buf_priv_t *buf_priv = buf->dev_private; RING_LOCALS; DRM_DEBUG( "indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end ); @@ -852,8 +981,6 @@ data[dwords++] = RADEON_CP_PACKET2; } - buf_priv->dispatched = 1; - /* Fire off the indirect buffer */ BEGIN_RING( 3 ); @@ -863,100 +990,75 @@ ADVANCE_RING(); } - - if ( buf_priv->discard ) { - buf_priv->age = dev_priv->sarea_priv->last_dispatch; - - /* Emit the indirect buffer age */ - BEGIN_RING( 2 ); - RADEON_DISPATCH_AGE( buf_priv->age ); - ADVANCE_RING(); - - buf->pending = 1; - buf->used = 0; - /* FIXME: Check dispatched field */ - buf_priv->dispatched = 0; - } - - dev_priv->sarea_priv->last_dispatch++; } + static void radeon_cp_dispatch_indices( drm_device_t *dev, - drm_buf_t *buf, - int start, int end, - int count ) + drm_buf_t *elt_buf, + drm_radeon_tcl_prim_t *prim, + drm_clip_rect_t *boxes, + int nbox ) { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_buf_priv_t *buf_priv = buf->dev_private; - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - int format = sarea_priv->vc_format; - int offset = dev_priv->agp_buffers_offset; - int prim = buf_priv->prim; + drm_clip_rect_t box; + int offset = dev_priv->agp_buffers_offset + prim->offset; u32 *data; int dwords; int i = 0; - RING_LOCALS; - DRM_DEBUG( "indices: s=%d e=%d c=%d\n", start, end, count ); - - if ( 0 ) - radeon_print_dirty( "dispatch_indices", sarea_priv->dirty ); - - if ( start != end ) { - buf_priv->dispatched = 1; + int start = prim->start + RADEON_INDEX_PRIM_OFFSET; + int count = (prim->finish - start) / sizeof(u16); - if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { - radeon_emit_state( dev_priv ); - } - - dwords = (end - start + 3) / sizeof(u32); - - data = (u32 *)((char *)dev_priv->buffers->handle - + buf->offset + start); - - data[0] = CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, dwords-2 ); - - data[1] = offset; - data[2] = RADEON_MAX_VB_VERTS; - data[3] = format; - data[4] = (prim | RADEON_PRIM_WALK_IND | - RADEON_COLOR_ORDER_RGBA | - RADEON_VTX_FMT_RADEON_MODE | - (count << RADEON_NUM_VERTICES_SHIFT) ); - - if ( count & 0x1 ) { - data[dwords-1] &= 0x0000ffff; - } - - do { - /* Emit the next set of up to three cliprects */ - if ( i < sarea_priv->nbox ) { - radeon_emit_clip_rect( dev_priv, - &sarea_priv->boxes[i] ); - } - - radeon_cp_dispatch_indirect( dev, buf, start, end ); - - i++; - } while ( i < sarea_priv->nbox ); + DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n", + prim->prim, + prim->vc_format, + prim->start, + prim->finish, + prim->offset, + prim->numverts); + + if (bad_prim_vertex_nr( prim->prim, count )) { + DRM_ERROR( "bad prim %x count %d\n", + prim->prim, count ); + return; } - if ( buf_priv->discard ) { - buf_priv->age = dev_priv->sarea_priv->last_dispatch; - /* Emit the vertex buffer age */ - BEGIN_RING( 2 ); - RADEON_DISPATCH_AGE( buf_priv->age ); - ADVANCE_RING(); + if ( start >= prim->finish || + (prim->start & 0x7) ) { + DRM_ERROR( "buffer prim %d\n", prim->prim ); + return; + } + + dwords = (prim->finish - prim->start + 3) / sizeof(u32); + + data = (u32 *)((char *)dev_priv->buffers->handle + + elt_buf->offset + prim->start); + + data[0] = CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, dwords-2 ); + data[1] = offset; + data[2] = prim->numverts; + data[3] = prim->vc_format; + data[4] = (prim->prim | + RADEON_PRIM_WALK_IND | + RADEON_COLOR_ORDER_RGBA | + RADEON_VTX_FMT_RADEON_MODE | + (count << RADEON_NUM_VERTICES_SHIFT) ); + + do { + if ( i < nbox ) { + if (__copy_from_user( &box, &boxes[i], sizeof(box) )) + return; + + radeon_emit_clip_rect( dev_priv, &box ); + } - buf->pending = 1; - /* FIXME: Check dispatched field */ - buf_priv->dispatched = 0; - } + radeon_cp_dispatch_indirect( dev, elt_buf, + prim->start, + prim->finish ); - dev_priv->sarea_priv->last_dispatch++; + i++; + } while ( i < nbox ); - sarea_priv->dirty &= ~RADEON_UPLOAD_CLIPRECTS; - sarea_priv->nbox = 0; } #define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32)) @@ -967,15 +1069,16 @@ { drm_radeon_private_t *dev_priv = dev->dev_private; drm_buf_t *buf; - drm_radeon_buf_priv_t *buf_priv; u32 format; u32 *buffer; - u8 *data; + const u8 *data; int size, dwords, tex_width, blit_width; u32 y, height; int ret = 0, i; RING_LOCALS; + dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD; + /* FIXME: Be smarter about this... */ buf = radeon_freelist_get( dev ); @@ -985,8 +1088,6 @@ tex->offset >> 10, tex->pitch, tex->format, image->x, image->y, image->width, image->height ); - buf_priv = buf->dev_private; - /* The compiler won't optimize away a division by a variable, * even if the only legal values are powers of two. Thus, we'll * use a shift instead. @@ -1002,6 +1103,8 @@ case RADEON_TXFORMAT_ARGB1555: case RADEON_TXFORMAT_RGB565: case RADEON_TXFORMAT_ARGB4444: + case RADEON_TXFORMAT_VYUY422: + case RADEON_TXFORMAT_YVYU422: format = RADEON_COLOR_FORMAT_RGB565; tex_width = tex->width * 2; blit_width = image->width * 2; @@ -1031,12 +1134,22 @@ ADVANCE_RING(); +#ifdef __BIG_ENDIAN + /* The Mesa texture functions provide the data in little endian as the + * chip wants it, but we need to compensate for the fact that the CP + * ring gets byte-swapped + */ + BEGIN_RING( 2 ); + OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT ); + ADVANCE_RING(); +#endif + /* Make a copy of the parameters in case we have to update them * for a multi-pass texture blit. */ y = image->y; height = image->height; - data = (u8 *)image->data; + data = (const u8 *)image->data; size = height * blit_width; @@ -1051,7 +1164,7 @@ /* Update the input parameters for next time */ image->y += height; image->height -= height; - image->data = (char *)image->data + size; + image->data = (const char *)image->data + size; if ( copy_to_user( tex->image, image, sizeof(*image) ) ) { DRM_ERROR( "EFAULT on tex->image\n" ); @@ -1112,9 +1225,9 @@ buf->pid = current->pid; buf->used = (dwords + 8) * sizeof(u32); - buf_priv->discard = 1; radeon_cp_dispatch_indirect( dev, buf, 0, buf->used ); + radeon_cp_discard_buffer( dev, buf ); /* Flush the pixel cache after the blit completes. This ensures * the texture data is written out to memory before rendering @@ -1130,12 +1243,13 @@ return ret; } + static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple ) { drm_radeon_private_t *dev_priv = dev->dev_private; int i; RING_LOCALS; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); BEGIN_RING( 35 ); @@ -1158,31 +1272,95 @@ int radeon_cp_clear( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_clear_t clear; + drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; + DRM_DEBUG( "\n" ); + + LOCK_TEST_WITH_RETURN( dev ); + + if ( copy_from_user( &clear, (drm_radeon_clear_t *)arg, + sizeof(clear) ) ) + return -EFAULT; + + RING_SPACE_TEST_WITH_RETURN( dev_priv ); + + if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS ) + sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; + + if ( copy_from_user( &depth_boxes, clear.depth_boxes, + sarea_priv->nbox * sizeof(depth_boxes[0]) ) ) + return -EFAULT; + + radeon_cp_dispatch_clear( dev, &clear, depth_boxes ); + + COMMIT_RING(); + return 0; +} + + +/* Not sure why this isn't set all the time: + */ +static int radeon_do_init_pageflip( drm_device_t *dev ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + DRM_DEBUG( "\n" ); + + BEGIN_RING( 6 ); + RADEON_WAIT_UNTIL_3D_IDLE(); + OUT_RING( CP_PACKET0( RADEON_CRTC_OFFSET_CNTL, 0 ) ); + OUT_RING( RADEON_READ( RADEON_CRTC_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL ); + OUT_RING( CP_PACKET0( RADEON_CRTC2_OFFSET_CNTL, 0 ) ); + OUT_RING( RADEON_READ( RADEON_CRTC2_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL ); + ADVANCE_RING(); + + dev_priv->page_flipping = 1; + dev_priv->current_page = 0; + dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page; + + return 0; +} + +/* Called whenever a client dies, from DRM(release). + * NOTE: Lock isn't necessarily held when this is called! + */ +int radeon_do_cleanup_pageflip( drm_device_t *dev ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + DRM_DEBUG( "\n" ); + + if (dev_priv->current_page != 0) + radeon_cp_dispatch_flip( dev ); + + dev_priv->page_flipping = 0; + return 0; +} + +/* Swapping and flipping are different operations, need different ioctls. + * They can & should be intermixed to support multiple 3d windows. + */ +int radeon_cp_flip(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_radeon_clear_t clear; - drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); LOCK_TEST_WITH_RETURN( dev ); - if ( copy_from_user( &clear, (drm_radeon_clear_t *)arg, - sizeof(clear) ) ) - return -EFAULT; - RING_SPACE_TEST_WITH_RETURN( dev_priv ); - if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS ) - sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; - - if ( copy_from_user( &depth_boxes, clear.depth_boxes, - sarea_priv->nbox * sizeof(depth_boxes[0]) ) ) - return -EFAULT; - - radeon_cp_dispatch_clear( dev, &clear, depth_boxes ); + if (!dev_priv->page_flipping) + radeon_do_init_pageflip( dev ); + + radeon_cp_dispatch_flip( dev ); + COMMIT_RING(); return 0; } @@ -1193,7 +1371,7 @@ drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; - DRM_DEBUG( "%s\n", __FUNCTION__ ); + DRM_DEBUG( "\n" ); LOCK_TEST_WITH_RETURN( dev ); @@ -1202,14 +1380,10 @@ if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS ) sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; - if ( !dev_priv->page_flipping ) { - radeon_cp_dispatch_swap( dev ); - dev_priv->sarea_priv->dirty |= (RADEON_UPLOAD_CONTEXT | - RADEON_UPLOAD_MASKS); - } else { - radeon_cp_dispatch_flip( dev ); - } + radeon_cp_dispatch_swap( dev ); + dev_priv->sarea_priv->ctx_owner = 0; + COMMIT_RING(); return 0; } @@ -1219,10 +1393,11 @@ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; - drm_radeon_buf_priv_t *buf_priv; drm_radeon_vertex_t vertex; + drm_radeon_tcl_prim_t prim; LOCK_TEST_WITH_RETURN( dev ); @@ -1235,8 +1410,8 @@ sizeof(vertex) ) ) return -EFAULT; - DRM_DEBUG( "%s: pid=%d index=%d count=%d discard=%d\n", - __FUNCTION__, current->pid, + DRM_DEBUG( "pid=%d index=%d count=%d discard=%d\n", + current->pid, vertex.idx, vertex.count, vertex.discard ); if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) { @@ -1254,7 +1429,6 @@ VB_AGE_TEST_WITH_RETURN( dev_priv ); buf = dma->buflist[vertex.idx]; - buf_priv = buf->dev_private; if ( buf->pid != current->pid ) { DRM_ERROR( "process %d using buffer owned by %d\n", @@ -1266,12 +1440,39 @@ return -EINVAL; } - buf->used = vertex.count; - buf_priv->prim = vertex.prim; - buf_priv->discard = vertex.discard; + /* Build up a prim_t record: + */ + if (vertex.count) { + buf->used = vertex.count; /* not used? */ + + if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { + radeon_emit_state( dev_priv, + &sarea_priv->context_state, + sarea_priv->tex_state, + sarea_priv->dirty ); + + sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | + RADEON_UPLOAD_TEX1IMAGES | + RADEON_UPLOAD_TEX2IMAGES | + RADEON_REQUIRE_QUIESCENCE); + } + + prim.start = 0; + prim.finish = vertex.count; /* unused */ + prim.prim = vertex.prim; + prim.numverts = vertex.count; + prim.vc_format = dev_priv->sarea_priv->vc_format; + + radeon_cp_dispatch_vertex( dev, buf, &prim, + dev_priv->sarea_priv->boxes, + dev_priv->sarea_priv->nbox ); + } - radeon_cp_dispatch_vertex( dev, buf ); + if (vertex.discard) { + radeon_cp_discard_buffer( dev, buf ); + } + COMMIT_RING(); return 0; } @@ -1281,10 +1482,11 @@ drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; - drm_radeon_buf_priv_t *buf_priv; drm_radeon_indices_t elts; + drm_radeon_tcl_prim_t prim; int count; LOCK_TEST_WITH_RETURN( dev ); @@ -1317,7 +1519,6 @@ VB_AGE_TEST_WITH_RETURN( dev_priv ); buf = dma->buflist[elts.idx]; - buf_priv = buf->dev_private; if ( buf->pid != current->pid ) { DRM_ERROR( "process %d using buffer owned by %d\n", @@ -1342,11 +1543,37 @@ } buf->used = elts.end; - buf_priv->prim = elts.prim; - buf_priv->discard = elts.discard; - radeon_cp_dispatch_indices( dev, buf, elts.start, elts.end, count ); + if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) { + radeon_emit_state( dev_priv, + &sarea_priv->context_state, + sarea_priv->tex_state, + sarea_priv->dirty ); + + sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | + RADEON_UPLOAD_TEX1IMAGES | + RADEON_UPLOAD_TEX2IMAGES | + RADEON_REQUIRE_QUIESCENCE); + } + + + /* Build up a prim_t record: + */ + prim.start = elts.start; + prim.finish = elts.end; + prim.prim = elts.prim; + prim.offset = 0; /* offset from start of dma buffers */ + prim.numverts = RADEON_MAX_VB_VERTS; /* duh */ + prim.vc_format = dev_priv->sarea_priv->vc_format; + + radeon_cp_dispatch_indices( dev, buf, &prim, + dev_priv->sarea_priv->boxes, + dev_priv->sarea_priv->nbox ); + if (elts.discard) { + radeon_cp_discard_buffer( dev, buf ); + } + COMMIT_RING(); return 0; } @@ -1358,6 +1585,7 @@ drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_texture_t tex; drm_radeon_tex_image_t image; + int ret; LOCK_TEST_WITH_RETURN( dev ); @@ -1377,7 +1605,10 @@ RING_SPACE_TEST_WITH_RETURN( dev_priv ); VB_AGE_TEST_WITH_RETURN( dev_priv ); - return radeon_cp_dispatch_texture( dev, &tex, &image ); + ret = radeon_cp_dispatch_texture( dev, &tex, &image ); + + COMMIT_RING(); + return ret; } int radeon_cp_stipple( struct inode *inode, struct file *filp, @@ -1402,6 +1633,7 @@ radeon_cp_dispatch_stipple( dev, mask ); + COMMIT_RING(); return 0; } @@ -1413,7 +1645,6 @@ drm_radeon_private_t *dev_priv = dev->dev_private; drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; - drm_radeon_buf_priv_t *buf_priv; drm_radeon_indirect_t indirect; RING_LOCALS; @@ -1439,7 +1670,6 @@ } buf = dma->buflist[indirect.idx]; - buf_priv = buf->dev_private; if ( buf->pid != current->pid ) { DRM_ERROR( "process %d using buffer owned by %d\n", @@ -1461,7 +1691,6 @@ VB_AGE_TEST_WITH_RETURN( dev_priv ); buf->used = indirect.end; - buf_priv->discard = indirect.discard; /* Wait for the 3D stream to idle before the indirect buffer * containing 2D acceleration commands is processed. @@ -1477,6 +1706,526 @@ * privileged clients. */ radeon_cp_dispatch_indirect( dev, buf, indirect.start, indirect.end ); + if (indirect.discard) { + radeon_cp_discard_buffer( dev, buf ); + } + + + COMMIT_RING(); + return 0; +} + +int radeon_cp_vertex2(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf; + drm_radeon_vertex2_t vertex; + int i; + unsigned char laststate; + + LOCK_TEST_WITH_RETURN( dev ); + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex2_t *)data, + sizeof(vertex) ); + + DRM_DEBUG( "pid=%d index=%d discard=%d\n", + current->pid, + vertex.idx, vertex.discard ); + + if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) { + DRM_ERROR( "buffer index %d (of %d max)\n", + vertex.idx, dma->buf_count - 1 ); + return -EINVAL; + } + + RING_SPACE_TEST_WITH_RETURN( dev_priv ); + VB_AGE_TEST_WITH_RETURN( dev_priv ); + + buf = dma->buflist[vertex.idx]; + + if ( buf->pid != current->pid ) { + DRM_ERROR( "process %d using buffer owned by %d\n", + current->pid, buf->pid ); + return -EINVAL; + } + + if ( buf->pending ) { + DRM_ERROR( "sending pending buffer %d\n", vertex.idx ); + return -EINVAL; + } + + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) + return -EINVAL; + + for (laststate = 0xff, i = 0 ; i < vertex.nr_prims ; i++) { + drm_radeon_prim_t prim; + drm_radeon_tcl_prim_t tclprim; + + if ( copy_from_user( &prim, &vertex.prim[i], sizeof(prim) ) ) + return -EFAULT; + + if ( prim.stateidx != laststate ) { + drm_radeon_state_t state; + + if ( copy_from_user( &state, + &vertex.state[prim.stateidx], + sizeof(state) ) ) + return -EFAULT; + + radeon_emit_state2( dev_priv, &state ); + + laststate = prim.stateidx; + } + + tclprim.start = prim.start; + tclprim.finish = prim.finish; + tclprim.prim = prim.prim; + tclprim.vc_format = prim.vc_format; + + if ( prim.prim & RADEON_PRIM_WALK_IND ) { + tclprim.offset = prim.numverts * 64; + tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */ + + radeon_cp_dispatch_indices( dev, buf, &tclprim, + sarea_priv->boxes, + sarea_priv->nbox); + } else { + tclprim.numverts = prim.numverts; + tclprim.offset = 0; /* not used */ + + radeon_cp_dispatch_vertex( dev, buf, &tclprim, + sarea_priv->boxes, + sarea_priv->nbox); + } + + if (sarea_priv->nbox == 1) + sarea_priv->nbox = 0; + } + + if ( vertex.discard ) { + radeon_cp_discard_buffer( dev, buf ); + } + + COMMIT_RING(); + return 0; +} + + +static int radeon_emit_packets( + drm_radeon_private_t *dev_priv, + drm_radeon_cmd_header_t header, + drm_radeon_cmd_buffer_t *cmdbuf ) +{ + int id = (int)header.packet.packet_id; + int sz, reg; + int *data = (int *)cmdbuf->buf; + RING_LOCALS; + + if (id >= RADEON_MAX_STATE_PACKETS) + return -EINVAL; + + sz = packet[id].len; + reg = packet[id].start; + + if (sz * sizeof(int) > cmdbuf->bufsz) + return -EINVAL; + + BEGIN_RING(sz+1); + OUT_RING( CP_PACKET0( reg, (sz-1) ) ); + OUT_RING_USER_TABLE( data, sz ); + ADVANCE_RING(); + + cmdbuf->buf += sz * sizeof(int); + cmdbuf->bufsz -= sz * sizeof(int); + return 0; +} + +static __inline__ int radeon_emit_scalars( + drm_radeon_private_t *dev_priv, + drm_radeon_cmd_header_t header, + drm_radeon_cmd_buffer_t *cmdbuf ) +{ + int sz = header.scalars.count; + int *data = (int *)cmdbuf->buf; + int start = header.scalars.offset; + int stride = header.scalars.stride; + RING_LOCALS; + + BEGIN_RING( 3+sz ); + OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) ); + OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); + OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) ); + OUT_RING_USER_TABLE( data, sz ); + ADVANCE_RING(); + cmdbuf->buf += sz * sizeof(int); + cmdbuf->bufsz -= sz * sizeof(int); + return 0; +} + +/* God this is ugly + */ +static __inline__ int radeon_emit_scalars2( + drm_radeon_private_t *dev_priv, + drm_radeon_cmd_header_t header, + drm_radeon_cmd_buffer_t *cmdbuf ) +{ + int sz = header.scalars.count; + int *data = (int *)cmdbuf->buf; + int start = ((unsigned int)header.scalars.offset) + 0x100; + int stride = header.scalars.stride; + RING_LOCALS; + + BEGIN_RING( 3+sz ); + OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) ); + OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); + OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) ); + OUT_RING_USER_TABLE( data, sz ); + ADVANCE_RING(); + cmdbuf->buf += sz * sizeof(int); + cmdbuf->bufsz -= sz * sizeof(int); + return 0; +} + +static __inline__ int radeon_emit_vectors( + drm_radeon_private_t *dev_priv, + drm_radeon_cmd_header_t header, + drm_radeon_cmd_buffer_t *cmdbuf ) +{ + int sz = header.vectors.count; + int *data = (int *)cmdbuf->buf; + int start = header.vectors.offset; + int stride = header.vectors.stride; + RING_LOCALS; + + BEGIN_RING( 3+sz ); + OUT_RING( CP_PACKET0( RADEON_SE_TCL_VECTOR_INDX_REG, 0 ) ); + OUT_RING( start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); + OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_VECTOR_DATA_REG, (sz-1) ) ); + OUT_RING_USER_TABLE( data, sz ); + ADVANCE_RING(); + + cmdbuf->buf += sz * sizeof(int); + cmdbuf->bufsz -= sz * sizeof(int); + return 0; +} + + +static int radeon_emit_packet3( drm_device_t *dev, + drm_radeon_cmd_buffer_t *cmdbuf ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + int cmdsz, tmp; + int *cmd = (int *)cmdbuf->buf; + RING_LOCALS; + + DRM_DEBUG("\n"); + + if (__get_user( tmp, &cmd[0])) + return -EFAULT; + + cmdsz = 2 + ((tmp & RADEON_CP_PACKET_COUNT_MASK) >> 16); + + if ((tmp & 0xc0000000) != RADEON_CP_PACKET3 || + cmdsz * 4 > cmdbuf->bufsz) + return -EINVAL; + + BEGIN_RING( cmdsz ); + OUT_RING_USER_TABLE( cmd, cmdsz ); + ADVANCE_RING(); + + cmdbuf->buf += cmdsz * 4; + cmdbuf->bufsz -= cmdsz * 4; + return 0; +} + + +static int radeon_emit_packet3_cliprect( drm_device_t *dev, + drm_radeon_cmd_buffer_t *cmdbuf, + int orig_nbox ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_clip_rect_t box; + int cmdsz, tmp; + int *cmd = (int *)cmdbuf->buf; + drm_clip_rect_t *boxes = cmdbuf->boxes; + int i = 0; + RING_LOCALS; + + DRM_DEBUG("\n"); + + if (__get_user( tmp, &cmd[0])) + return -EFAULT; + + cmdsz = 2 + ((tmp & RADEON_CP_PACKET_COUNT_MASK) >> 16); + + if ((tmp & 0xc0000000) != RADEON_CP_PACKET3 || + cmdsz * 4 > cmdbuf->bufsz) + return -EINVAL; + + if (!orig_nbox) + goto out; + + do { + if ( i < cmdbuf->nbox ) { + if (__copy_from_user( &box, &boxes[i], sizeof(box) )) + return -EFAULT; + /* FIXME The second and subsequent times round + * this loop, send a WAIT_UNTIL_3D_IDLE before + * calling emit_clip_rect(). This fixes a + * lockup on fast machines when sending + * several cliprects with a cmdbuf, as when + * waving a 2D window over a 3D + * window. Something in the commands from user + * space seems to hang the card when they're + * sent several times in a row. That would be + * the correct place to fix it but this works + * around it until I can figure that out - Tim + * Smith */ + if ( i ) { + BEGIN_RING( 2 ); + RADEON_WAIT_UNTIL_3D_IDLE(); + ADVANCE_RING(); + } + radeon_emit_clip_rect( dev_priv, &box ); + } + + BEGIN_RING( cmdsz ); + OUT_RING_USER_TABLE( cmd, cmdsz ); + ADVANCE_RING(); + + } while ( ++i < cmdbuf->nbox ); + if (cmdbuf->nbox == 1) + cmdbuf->nbox = 0; + + out: + cmdbuf->buf += cmdsz * 4; + cmdbuf->bufsz -= cmdsz * 4; + return 0; +} + + +static int radeon_emit_wait( drm_device_t *dev, int flags ) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + RING_LOCALS; + + DRM_DEBUG("%s: %x\n", __FUNCTION__, flags); + switch (flags) { + case RADEON_WAIT_2D: + BEGIN_RING( 2 ); + RADEON_WAIT_UNTIL_2D_IDLE(); + ADVANCE_RING(); + break; + case RADEON_WAIT_3D: + BEGIN_RING( 2 ); + RADEON_WAIT_UNTIL_3D_IDLE(); + ADVANCE_RING(); + break; + case RADEON_WAIT_2D|RADEON_WAIT_3D: + BEGIN_RING( 2 ); + RADEON_WAIT_UNTIL_IDLE(); + ADVANCE_RING(); + break; + default: + return -EINVAL; + } + + return 0; +} + +int radeon_cp_cmdbuf(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_device_dma_t *dma = dev->dma; + drm_buf_t *buf = 0; + int idx; + drm_radeon_cmd_buffer_t cmdbuf; + drm_radeon_cmd_header_t header; + int orig_nbox; + + LOCK_TEST_WITH_RETURN( dev ); + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( cmdbuf, (drm_radeon_cmd_buffer_t *)data, + sizeof(cmdbuf) ); + + RING_SPACE_TEST_WITH_RETURN( dev_priv ); + VB_AGE_TEST_WITH_RETURN( dev_priv ); + + + if (verify_area( VERIFY_READ, cmdbuf.buf, cmdbuf.bufsz )) + return -EFAULT; + + if (cmdbuf.nbox && + verify_area( VERIFY_READ, cmdbuf.boxes, + cmdbuf.nbox * sizeof(drm_clip_rect_t))) + return -EFAULT; + + orig_nbox = cmdbuf.nbox; + + while ( cmdbuf.bufsz >= sizeof(header) ) { + + if (__get_user( header.i, (int *)cmdbuf.buf )) { + DRM_ERROR("__get_user %p\n", cmdbuf.buf); + return -EFAULT; + } + + cmdbuf.buf += sizeof(header); + cmdbuf.bufsz -= sizeof(header); + + switch (header.header.cmd_type) { + case RADEON_CMD_PACKET: + DRM_DEBUG("RADEON_CMD_PACKET\n"); + if (radeon_emit_packets( dev_priv, header, &cmdbuf )) { + DRM_ERROR("radeon_emit_packets failed\n"); + return -EINVAL; + } + break; + + case RADEON_CMD_SCALARS: + DRM_DEBUG("RADEON_CMD_SCALARS\n"); + if (radeon_emit_scalars( dev_priv, header, &cmdbuf )) { + DRM_ERROR("radeon_emit_scalars failed\n"); + return -EINVAL; + } + break; + + case RADEON_CMD_VECTORS: + DRM_DEBUG("RADEON_CMD_VECTORS\n"); + if (radeon_emit_vectors( dev_priv, header, &cmdbuf )) { + DRM_ERROR("radeon_emit_vectors failed\n"); + return -EINVAL; + } + break; + + case RADEON_CMD_DMA_DISCARD: + DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); + idx = header.dma.buf_idx; + if ( idx < 0 || idx >= dma->buf_count ) { + DRM_ERROR( "buffer index %d (of %d max)\n", + idx, dma->buf_count - 1 ); + return -EINVAL; + } + + buf = dma->buflist[idx]; + if ( buf->pid != current->pid || buf->pending ) { + DRM_ERROR( "bad buffer\n" ); + return -EINVAL; + } + + radeon_cp_discard_buffer( dev, buf ); + break; + + case RADEON_CMD_PACKET3: + DRM_DEBUG("RADEON_CMD_PACKET3\n"); + if (radeon_emit_packet3( dev, &cmdbuf )) { + DRM_ERROR("radeon_emit_packet3 failed\n"); + return -EINVAL; + } + break; + + case RADEON_CMD_PACKET3_CLIP: + DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n"); + if (radeon_emit_packet3_cliprect( dev, &cmdbuf, orig_nbox )) { + DRM_ERROR("radeon_emit_packet3_clip failed\n"); + return -EINVAL; + } + break; + + case RADEON_CMD_SCALARS2: + DRM_DEBUG("RADEON_CMD_SCALARS2\n"); + if (radeon_emit_scalars2( dev_priv, header, &cmdbuf )) { + DRM_ERROR("radeon_emit_scalars2 failed\n"); + return -EINVAL; + } + break; + + case RADEON_CMD_WAIT: + DRM_DEBUG("RADEON_CMD_WAIT\n"); + if (radeon_emit_wait( dev, header.wait.flags )) { + DRM_ERROR("radeon_emit_wait failed\n"); + return -EINVAL; + } + break; + default: + DRM_ERROR("bad cmd_type %d at %p\n", + header.header.cmd_type, + cmdbuf.buf - sizeof(header)); + return -EINVAL; + } + } + + + DRM_DEBUG("DONE\n"); + COMMIT_RING(); + return 0; +} + + + +int radeon_cp_getparam(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_getparam_t param; + int value; + + if ( !dev_priv ) { + DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL( param, (drm_radeon_getparam_t *)data, + sizeof(param) ); + + DRM_DEBUG( "pid=%d\n", current->pid ); + + switch( param.param ) { + case RADEON_PARAM_AGP_BUFFER_OFFSET: + value = dev_priv->agp_buffers_offset; + break; + case RADEON_PARAM_LAST_FRAME: + dev_priv->stats.last_frame_reads++; + value = GET_SCRATCH( 0 ); + break; + case RADEON_PARAM_LAST_DISPATCH: + value = GET_SCRATCH( 1 ); + break; + case RADEON_PARAM_LAST_CLEAR: + dev_priv->stats.last_clear_reads++; + value = GET_SCRATCH( 2 ); + break; + case RADEON_PARAM_IRQ_NR: + value = dev->irq; + break; + case RADEON_PARAM_AGP_BASE: + value = dev_priv->agp_vm_start; + break; + default: + return -EINVAL; + } + + if ( copy_to_user( param.value, &value, sizeof(int) ) ) { + DRM_ERROR( "copy_to_user\n" ); + return -EFAULT; + } + return 0; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/sis_drm.h linux.21pre4-ac2/drivers/char/drm/sis_drm.h --- linux.21pre4/drivers/char/drm/sis_drm.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/sis_drm.h 2003-01-06 17:28:08.000000000 +0000 @@ -2,6 +2,16 @@ #ifndef _sis_drm_public_h_ #define _sis_drm_public_h_ +/* SiS specific ioctls */ +#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t) +#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t) +#define SIS_IOCTL_AGP_INIT DRM_IOWR(0x53, drm_sis_agp_t) +#define SIS_IOCTL_AGP_ALLOC DRM_IOWR(0x54, drm_sis_mem_t) +#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t) +#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t) +#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49) +#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50) + typedef struct { int context; unsigned int offset; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/sis_drv.c linux.21pre4-ac2/drivers/char/drm/sis_drv.c --- linux.21pre4/drivers/char/drm/sis_drv.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/sis_drv.c 2003-01-06 17:28:08.000000000 +0000 @@ -31,31 +31,6 @@ #include "sis_drm.h" #include "sis_drv.h" -#define DRIVER_AUTHOR "SIS" -#define DRIVER_NAME "sis" -#define DRIVER_DESC "SIS 300/630/540" -#define DRIVER_DATE "20010503" -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 0 - -#define DRIVER_IOCTLS \ - [DRM_IOCTL_NR(SIS_IOCTL_FB_ALLOC)] = { sis_fb_alloc, 1, 0 }, \ - [DRM_IOCTL_NR(SIS_IOCTL_FB_FREE)] = { sis_fb_free, 1, 0 }, \ - /* AGP Memory Management */ \ - [DRM_IOCTL_NR(SIS_IOCTL_AGP_INIT)] = { sisp_agp_init, 1, 0 }, \ - [DRM_IOCTL_NR(SIS_IOCTL_AGP_ALLOC)] = { sisp_agp_alloc, 1, 0 }, \ - [DRM_IOCTL_NR(SIS_IOCTL_AGP_FREE)] = { sisp_agp_free, 1, 0 } -#if 0 /* these don't appear to be defined */ - /* SIS Stereo */ - [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { sis_control, 1, 1 }, - [DRM_IOCTL_NR(SIS_IOCTL_FLIP)] = { sis_flip, 1, 1 }, - [DRM_IOCTL_NR(SIS_IOCTL_FLIP_INIT)] = { sis_flip_init, 1, 1 }, - [DRM_IOCTL_NR(SIS_IOCTL_FLIP_FINAL)] = { sis_flip_final, 1, 1 } -#endif - -#define __HAVE_COUNTERS 5 - #include "drm_auth.h" #include "drm_agpsupport.h" #include "drm_bufs.h" diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/sis_ds.c linux.21pre4-ac2/drivers/char/drm/sis_ds.c --- linux.21pre4/drivers/char/drm/sis_ds.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/sis_ds.c 2003-01-06 17:28:08.000000000 +0000 @@ -49,16 +49,15 @@ set_t *set; set = (set_t *)MALLOC(sizeof(set_t)); - if (set) { - for(i = 0; i < SET_SIZE; i++){ - set->list[i].free_next = i+1; - set->list[i].alloc_next = -1; - } - set->list[SET_SIZE-1].free_next = -1; - set->free = 0; - set->alloc = -1; - set->trace = -1; - } + for(i = 0; i < SET_SIZE; i++){ + set->list[i].free_next = i+1; + set->list[i].alloc_next = -1; + } + set->list[SET_SIZE-1].free_next = -1; + set->free = 0; + set->alloc = -1; + set->trace = -1; + return set; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/sis.h linux.21pre4-ac2/drivers/char/drm/sis.h --- linux.21pre4/drivers/char/drm/sis.h 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/sis.h 2003-01-06 17:28:08.000000000 +0000 @@ -24,7 +24,7 @@ * DEALINGS IN THE SOFTWARE. * */ -/* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/sis.h,v 1.1 2001/05/19 18:29:22 dawes Exp $ */ +/* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/sis.h,v 1.2 2001/12/19 21:25:59 dawes Exp $ */ #ifndef __SIS_H__ #define __SIS_H__ @@ -42,6 +42,31 @@ #define __HAVE_MTRR 1 #define __HAVE_CTX_BITMAP 1 +#define DRIVER_AUTHOR "SIS" +#define DRIVER_NAME "sis" +#define DRIVER_DESC "SIS 300/630/540" +#define DRIVER_DATE "20010503" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 0 + +#define DRIVER_IOCTLS \ + [DRM_IOCTL_NR(SIS_IOCTL_FB_ALLOC)] = { sis_fb_alloc, 1, 0 }, \ + [DRM_IOCTL_NR(SIS_IOCTL_FB_FREE)] = { sis_fb_free, 1, 0 }, \ + /* AGP Memory Management */ \ + [DRM_IOCTL_NR(SIS_IOCTL_AGP_INIT)] = { sisp_agp_init, 1, 0 }, \ + [DRM_IOCTL_NR(SIS_IOCTL_AGP_ALLOC)] = { sisp_agp_alloc, 1, 0 }, \ + [DRM_IOCTL_NR(SIS_IOCTL_AGP_FREE)] = { sisp_agp_free, 1, 0 } +#if 0 /* these don't appear to be defined */ + /* SIS Stereo */ + [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { sis_control, 1, 1 }, + [DRM_IOCTL_NR(SIS_IOCTL_FLIP)] = { sis_flip, 1, 1 }, + [DRM_IOCTL_NR(SIS_IOCTL_FLIP_INIT)] = { sis_flip_init, 1, 1 }, + [DRM_IOCTL_NR(SIS_IOCTL_FLIP_FINAL)] = { sis_flip_final, 1, 1 } +#endif + +#define __HAVE_COUNTERS 5 + /* Buffer customization: */ #define DRIVER_AGP_BUFFERS_MAP( dev ) \ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm/tdfx_drv.c linux.21pre4-ac2/drivers/char/drm/tdfx_drv.c --- linux.21pre4/drivers/char/drm/tdfx_drv.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm/tdfx_drv.c 2003-01-06 17:28:08.000000000 +0000 @@ -82,25 +82,6 @@ #include "drm_drawable.h" #include "drm_drv.h" -#ifndef MODULE -/* DRM(options) is called by the kernel to parse command-line options - * passed via the boot-loader (e.g., LILO). It calls the insmod option - * routine, drm_parse_drm. - */ - -/* JH- We have to hand expand the string ourselves because of the cpp. If - * anyone can think of a way that we can fit into the __setup macro without - * changing it, then please send the solution my way. - */ -static int __init tdfx_options( char *str ) -{ - DRM(parse_options)( str ); - return 1; -} - -__setup( DRIVER_NAME "=", tdfx_options ); -#endif - #include "drm_fops.h" #include "drm_init.h" #include "drm_ioctl.h" diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm-4.0/agpsupport.c linux.21pre4-ac2/drivers/char/drm-4.0/agpsupport.c --- linux.21pre4/drivers/char/drm-4.0/agpsupport.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm-4.0/agpsupport.c 2003-01-06 15:38:22.000000000 +0000 @@ -277,7 +277,6 @@ break; case VIA_APOLLO_KT400: head->chipset = "VIA Apollo KT400"; break; - case VIA_APOLLO_P4X400: head->chipset = "VIA Apollo P4X400"; #endif case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro"; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/drm-4.0/tdfx_drv.c linux.21pre4-ac2/drivers/char/drm-4.0/tdfx_drv.c --- linux.21pre4/drivers/char/drm-4.0/tdfx_drv.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/drm-4.0/tdfx_drv.c 2003-01-06 19:14:12.000000000 +0000 @@ -554,7 +554,6 @@ lock.context, current->pid, j, dev->lock.lock_time, jiffies); current->state = TASK_INTERRUPTIBLE; - current->policy |= SCHED_YIELD; schedule_timeout(DRM_LOCK_SLICE-j); DRM_DEBUG("jiffies=%d\n", jiffies); } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/genrtc.c linux.21pre4-ac2/drivers/char/genrtc.c --- linux.21pre4/drivers/char/genrtc.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/char/genrtc.c 2003-01-28 16:35:25.000000000 +0000 @@ -0,0 +1,533 @@ +/* + * Real Time Clock interface for + * - q40 and other m68k machines, + * - HP PARISC machines + * - PowerPC machines + * emulate some RTC irq capabilities in software + * + * Copyright (C) 1999 Richard Zidlicky + * + * based on Paul Gortmaker's rtc.c device and + * Sam Creasey Generic rtc driver + * + * This driver allows use of the real time clock (built into + * nearly all computers) from user space. It exports the /dev/rtc + * interface supporting various ioctl() and also the /proc/dev/rtc + * pseudo-file for status information. + * + * The ioctls can be used to set the interrupt behaviour where + * supported. + * + * The /dev/rtc interface will block on reads until an interrupt + * has been received. If a RTC interrupt has already happened, + * it will output an unsigned long and then block. The output value + * contains the interrupt status in the low byte and the number of + * interrupts since the last read in the remaining high bytes. The + * /dev/rtc interface can also be used with the select(2) call. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + + * 1.01 fix for 2.3.X rz@linux-m68k.org + * 1.02 merged with code from genrtc.c rz@linux-m68k.org + * 1.03 make it more portable zippel@linux-m68k.org + * 1.04 removed useless timer code rz@linux-m68k.org + * 1.05 portable RTC_UIE emulation rz@linux-m68k.org + * 1.06 set_rtc_time can return an error trini@kernel.crashing.org + * 1.07 ported to HP PARISC (hppa) Helge Deller + */ + +#define RTC_VERSION "1.07" + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +/* + * We sponge a minor off of the misc major. No need slurping + * up another valuable major dev number for this. If you add + * an ioctl, make sure you don't conflict with SPARC's RTC + * ioctls. + */ + +static DECLARE_WAIT_QUEUE_HEAD(gen_rtc_wait); + +/* + * Bits in gen_rtc_status. + */ + +#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */ + +static unsigned char gen_rtc_status; /* bitmapped status byte. */ +static unsigned long gen_rtc_irq_data; /* our output to the world */ + +/* months start at 0 now */ +static unsigned char days_in_mo[] = +{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; + +static int irq_active; + +#ifdef CONFIG_GEN_RTC_X +struct tq_struct genrtc_task; +static struct timer_list timer_task; + +static unsigned int oldsecs; +static int lostint; +static int tt_exp; + +static void gen_rtc_timer(unsigned long data); + +static volatile int stask_active; /* schedule_task */ +static volatile int ttask_active; /* timer_task */ +static int stop_rtc_timers; /* don't requeue tasks */ +static spinlock_t gen_rtc_lock = SPIN_LOCK_UNLOCKED; + +static void gen_rtc_interrupt(unsigned long arg); + +/* + * Routine to poll RTC seconds field for change as often as posible, + * after first RTC_UIE use timer to reduce polling + */ +static void genrtc_troutine(void *data) +{ + unsigned int tmp = get_rtc_ss(); + + if (stop_rtc_timers) { + stask_active = 0; + return; + } + + if (oldsecs != tmp){ + oldsecs = tmp; + + timer_task.function = gen_rtc_timer; + timer_task.expires = jiffies + HZ - (HZ/10); + tt_exp=timer_task.expires; + ttask_active=1; + stask_active=0; + add_timer(&timer_task); + + gen_rtc_interrupt(0); + } else if (schedule_task(&genrtc_task) == 0) + stask_active = 0; +} + +static void gen_rtc_timer(unsigned long data) +{ + lostint = get_rtc_ss() - oldsecs ; + if (lostint<0) + lostint = 60 - lostint; + if (time_after(jiffies, tt_exp)) + printk(KERN_INFO "genrtc: timer task delayed by %ld jiffies\n", + jiffies-tt_exp); + ttask_active=0; + stask_active=1; + if ((schedule_task(&genrtc_task) == 0)) + stask_active = 0; +} + +/* + * call gen_rtc_interrupt function to signal an RTC_UIE, + * arg is unused. + * Could be invoked either from a real interrupt handler or + * from some routine that periodically (eg 100HZ) monitors + * whether RTC_SECS changed + */ +static void gen_rtc_interrupt(unsigned long arg) +{ + /* We store the status in the low byte and the number of + * interrupts received since the last read in the remainder + * of rtc_irq_data. */ + + gen_rtc_irq_data += 0x100; + gen_rtc_irq_data &= ~0xff; + gen_rtc_irq_data |= RTC_UIE; + + if (lostint){ + printk("genrtc: system delaying clock ticks?\n"); + /* increment count so that userspace knows something is wrong */ + gen_rtc_irq_data += ((lostint-1)<<8); + lostint = 0; + } + + wake_up_interruptible(&gen_rtc_wait); +} + +/* + * Now all the various file operations that we export. + */ +static ssize_t gen_rtc_read(struct file *file, char *buf, + size_t count, loff_t *ppos) +{ + DECLARE_WAITQUEUE(wait, current); + unsigned long data; + ssize_t retval; + + if (count != sizeof (unsigned int) && count != sizeof (unsigned long)) + return -EINVAL; + + if (file->f_flags & O_NONBLOCK && !gen_rtc_irq_data) + return -EAGAIN; + + add_wait_queue(&gen_rtc_wait, &wait); + retval = -ERESTARTSYS; + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + data = xchg(&gen_rtc_irq_data, 0); + if (data) + break; + if (signal_pending(current)) + goto out; + schedule(); + } + + /* first test allows optimizer to nuke this case for 32-bit machines */ + if (sizeof (int) != sizeof (long) && count == sizeof (unsigned int)) { + unsigned int uidata = data; + retval = put_user(uidata, (unsigned long *)buf); + } + else { + retval = put_user(data, (unsigned long *)buf); + } + if (!retval) + retval = sizeof(unsigned long); + out: + current->state = TASK_RUNNING; + remove_wait_queue(&gen_rtc_wait, &wait); + + return retval; +} + +static unsigned int gen_rtc_poll(struct file *file, + struct poll_table_struct *wait) +{ + poll_wait(file, &gen_rtc_wait, wait); + if (gen_rtc_irq_data != 0) + return POLLIN | POLLRDNORM; + return 0; +} + +#endif + +/* + * Used to disable/enable interrupts, only RTC_UIE supported + * We also clear out any old irq data after an ioctl() that + * meddles with the interrupt enable/disable bits. + */ + +static inline void gen_clear_rtc_irq_bit(unsigned char bit) +{ +#ifdef CONFIG_GEN_RTC_X + stop_rtc_timers = 1; + if (ttask_active){ + del_timer_sync(&timer_task); + ttask_active = 0; + } + while (stask_active) + schedule(); + + spin_lock(&gen_rtc_lock); + irq_active = 0; + spin_unlock(&gen_rtc_lock); +#endif +} + +static inline int gen_set_rtc_irq_bit(unsigned char bit) +{ +#ifdef CONFIG_GEN_RTC_X + spin_lock(&gen_rtc_lock); + if ( !irq_active ) { + irq_active = 1; + stop_rtc_timers = 0; + lostint = 0; + genrtc_task.routine = genrtc_troutine; + oldsecs = get_rtc_ss(); + init_timer(&timer_task); + + stask_active = 1; + if (schedule_task(&genrtc_task) == 0){ + stask_active = 0; + } + } + spin_unlock(&gen_rtc_lock); + gen_rtc_irq_data = 0; + return 0; +#else + return -EINVAL; +#endif +} + +static int gen_rtc_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct rtc_time wtime; + struct rtc_pll_info pll; + + switch (cmd) { + + case RTC_PLL_GET: + if (get_rtc_pll(&pll)) + return -EINVAL; + else + return copy_to_user((void *)arg, &pll, sizeof pll) ? -EFAULT : 0; + + case RTC_PLL_SET: + if (!capable(CAP_SYS_TIME)) + return -EACCES; + if (copy_from_user(&pll, (struct rtc_pll_info*)arg, + sizeof(pll))) + return -EFAULT; + return set_rtc_pll(&pll); + + case RTC_UIE_OFF: /* disable ints from RTC updates. */ + gen_clear_rtc_irq_bit(RTC_UIE); + return 0; + + case RTC_UIE_ON: /* enable ints for RTC updates. */ + return gen_set_rtc_irq_bit(RTC_UIE); + + case RTC_RD_TIME: /* Read the time/date from RTC */ + /* this doesn't get week-day, who cares */ + memset(&wtime, 0, sizeof(wtime)); + get_rtc_time(&wtime); + + return copy_to_user((void *)arg, &wtime, sizeof(wtime)) ? -EFAULT : 0; + + case RTC_SET_TIME: /* Set the RTC */ + { + int year; + unsigned char leap_yr; + + if (!capable(CAP_SYS_TIME)) + return -EACCES; + + if (copy_from_user(&wtime, (struct rtc_time *)arg, + sizeof(wtime))) + return -EFAULT; + + year = wtime.tm_year + 1900; + leap_yr = ((!(year % 4) && (year % 100)) || + !(year % 400)); + + if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) || (wtime.tm_mday < 1)) + return -EINVAL; + + if (wtime.tm_mday < 0 || wtime.tm_mday > + (days_in_mo[wtime.tm_mon] + ((wtime.tm_mon == 1) && leap_yr))) + return -EINVAL; + + if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 || + wtime.tm_min < 0 || wtime.tm_min >= 60 || + wtime.tm_sec < 0 || wtime.tm_sec >= 60) + return -EINVAL; + + return set_rtc_time(&wtime); + } + } + + return -EINVAL; +} + +/* + * We enforce only one user at a time here with the open/close. + * Also clear the previous interrupt data on an open, and clean + * up things on a close. + */ + +static int gen_rtc_open(struct inode *inode, struct file *file) +{ + if (gen_rtc_status & RTC_IS_OPEN) + return -EBUSY; + + MOD_INC_USE_COUNT; + + gen_rtc_status |= RTC_IS_OPEN; + gen_rtc_irq_data = 0; + irq_active = 0; + + return 0; +} + +static int gen_rtc_release(struct inode *inode, struct file *file) +{ + /* + * Turn off all interrupts once the device is no longer + * in use and clear the data. + */ + + gen_clear_rtc_irq_bit(RTC_PIE|RTC_AIE|RTC_UIE); + + gen_rtc_status &= ~RTC_IS_OPEN; + MOD_DEC_USE_COUNT; + + return 0; +} + +static int gen_rtc_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data); + + +/* + * The various file operations we support. + */ + +static struct file_operations gen_rtc_fops = { + .owner = THIS_MODULE, +#ifdef CONFIG_GEN_RTC_X + .read = gen_rtc_read, + .poll = gen_rtc_poll, +#endif + .ioctl = gen_rtc_ioctl, + .open = gen_rtc_open, + .release = gen_rtc_release, +}; + +static struct miscdevice rtc_gen_dev = +{ + .minor = RTC_MINOR, + .name = "rtc", + .fops = &gen_rtc_fops, +}; + +static int __init rtc_generic_init(void) +{ + int retval; + + printk(KERN_INFO "Generic RTC Driver v%s\n", RTC_VERSION); + + retval = misc_register(&rtc_gen_dev); + if(retval < 0) + return retval; + +#ifdef CONFIG_PROC_FS + if((create_proc_read_entry ("driver/rtc", 0, 0, gen_rtc_read_proc, NULL)) == NULL){ + misc_deregister(&rtc_gen_dev); + return -ENOMEM; + } +#endif + + return 0; +} + +static void __exit rtc_generic_exit(void) +{ + remove_proc_entry ("driver/rtc", NULL); + misc_deregister(&rtc_gen_dev); +} + +module_init(rtc_generic_init); +module_exit(rtc_generic_exit); +EXPORT_NO_SYMBOLS; + + +/* + * Info exported via "/proc/rtc". + */ + +#ifdef CONFIG_PROC_FS + +static int gen_rtc_proc_output(char *buf) +{ + char *p; + struct rtc_time tm; + unsigned int flags; + struct rtc_pll_info pll; + + p = buf; + + flags = get_rtc_time(&tm); + + p += sprintf(p, + "rtc_time\t: %02d:%02d:%02d\n" + "rtc_date\t: %04d-%02d-%02d\n" + "rtc_epoch\t: %04u\n", + tm.tm_hour, tm.tm_min, tm.tm_sec, + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, 1900); + + tm.tm_hour = tm.tm_min = tm.tm_sec = 0; + + p += sprintf(p, "alarm\t\t: "); + if (tm.tm_hour <= 24) + p += sprintf(p, "%02d:", tm.tm_hour); + else + p += sprintf(p, "**:"); + + if (tm.tm_min <= 59) + p += sprintf(p, "%02d:", tm.tm_min); + else + p += sprintf(p, "**:"); + + if (tm.tm_sec <= 59) + p += sprintf(p, "%02d\n", tm.tm_sec); + else + p += sprintf(p, "**\n"); + + p += sprintf(p, + "DST_enable\t: %s\n" + "BCD\t\t: %s\n" + "24hr\t\t: %s\n" + "square_wave\t: %s\n" + "alarm_IRQ\t: %s\n" + "update_IRQ\t: %s\n" + "periodic_IRQ\t: %s\n" + "periodic_freq\t: %ld\n" + "batt_status\t: %s\n", + (flags & RTC_DST_EN) ? "yes" : "no", + (flags & RTC_DM_BINARY) ? "no" : "yes", + (flags & RTC_24H) ? "yes" : "no", + (flags & RTC_SQWE) ? "yes" : "no", + (flags & RTC_AIE) ? "yes" : "no", + irq_active ? "yes" : "no", + (flags & RTC_PIE) ? "yes" : "no", + 0L /* freq */, + (flags & RTC_BATT_BAD) ? "bad" : "okay"); + if (!get_rtc_pll(&pll)) + p += sprintf(p, + "PLL adjustment\t: %d\n" + "PLL max +ve adjustment\t: %d\n" + "PLL max -ve adjustment\t: %d\n" + "PLL +ve adjustment factor\t: %d\n" + "PLL -ve adjustment factor\t: %d\n" + "PLL frequency\t: %ld\n", + pll.pll_value, + pll.pll_max, + pll.pll_min, + pll.pll_posmult, + pll.pll_negmult, + pll.pll_clock); + return p - buf; +} + +static int gen_rtc_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len = gen_rtc_proc_output (page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + return len; +} + +#endif /* CONFIG_PROC_FS */ + + +MODULE_AUTHOR("Richard Zidlicky"); +MODULE_LICENSE("GPL"); + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/ipmi/ipmi_kcs_intf.c linux.21pre4-ac2/drivers/char/ipmi/ipmi_kcs_intf.c --- linux.21pre4/drivers/char/ipmi/ipmi_kcs_intf.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/ipmi/ipmi_kcs_intf.c 2003-01-09 00:30:24.000000000 +0000 @@ -252,6 +252,7 @@ if (kcs_info->msg_flags & WDT_PRE_TIMEOUT_INT) { /* Watchdog pre-timeout */ start_clear_flags(kcs_info); + kcs_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; spin_unlock(&(kcs_info->kcs_lock)); ipmi_smi_watchdog_pretimeout(kcs_info->intf); spin_lock(&(kcs_info->kcs_lock)); @@ -323,13 +324,18 @@ case KCS_GETTING_FLAGS: { unsigned char msg[4]; + unsigned int len; /* We got the flags from the KCS, now handle them. */ - kcs_get_result(kcs_info->kcs_sm, msg, 4); + len = kcs_get_result(kcs_info->kcs_sm, msg, 4); if (msg[2] != 0) { /* Error fetching flags, just give up for now. */ kcs_info->kcs_state = KCS_NORMAL; + } else if (len < 3) { + /* Hmm, no flags. That's technically illegal, but + don't use uninitialized data. */ + kcs_info->kcs_state = KCS_NORMAL; } else { kcs_info->msg_flags = msg[3]; handle_flags(kcs_info); @@ -1036,7 +1042,9 @@ int rv = 0; int pos = 0; int i = 0; +#ifdef CONFIG_ACPI unsigned long physaddr = 0; +#endif if (initialized) return 0; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/ipmi/ipmi_kcs_sm.c linux.21pre4-ac2/drivers/char/ipmi/ipmi_kcs_sm.c --- linux.21pre4/drivers/char/ipmi/ipmi_kcs_sm.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/ipmi/ipmi_kcs_sm.c 2003-01-09 00:30:24.000000000 +0000 @@ -93,6 +93,7 @@ #define MAX_ERROR_RETRIES 10 #define IPMI_ERR_MSG_TRUNCATED 0xc6 +#define IPMI_ERR_UNSPECIFIED 0xff struct kcs_data { @@ -291,6 +292,12 @@ memcpy(data, kcs->read_data, kcs->read_pos); + if ((length >= 3) && (kcs->read_pos < 3)) { + /* Guarantee that we return at least 3 bytes, with an + error in the third byte if it is too short. */ + data[2] = IPMI_ERR_UNSPECIFIED; + kcs->read_pos = 3; + } if (kcs->truncated) { /* Report a truncated error. We might overwrite another error, but that's too bad, the user needs diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/ipmi/ipmi_watchdog.c linux.21pre4-ac2/drivers/char/ipmi/ipmi_watchdog.c --- linux.21pre4/drivers/char/ipmi/ipmi_watchdog.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/ipmi/ipmi_watchdog.c 2003-01-09 00:30:24.000000000 +0000 @@ -44,6 +44,8 @@ #include #include #include +#include +#include #ifdef CONFIG_X86_LOCAL_APIC #include #endif @@ -73,6 +75,14 @@ #define WDOG_PRETIMEOUT_NMI 2 #define WDOG_PRETIMEOUT_MSG_INT 3 +/* Operations that can be performed on a pretimout. */ +#define WDOG_PREOP_NONE 0 +#define WDOG_PREOP_PANIC 1 +#define WDOG_PREOP_GIVE_DATA 2 /* Cause data to be available to + read. Doesn't work in NMI + mode. */ + +/* Actions to perform on a full timeout. */ #define WDOG_SET_TIMEOUT_ACT(byte, use) \ byte = ((byte) & 0xf8) | ((use) & 0x7) #define WDOG_GET_TIMEOUT_ACT(byte) ((byte) & 0x7) @@ -133,10 +143,20 @@ static char *preaction = "pre_none"; +static unsigned char preop_val = WDOG_PREOP_NONE; + +static char *preop = "preop_none"; +static spinlock_t ipmi_read_lock = SPIN_LOCK_UNLOCKED; +static char data_to_read = 0; +static DECLARE_WAIT_QUEUE_HEAD(read_q); +static struct fasync_struct *fasync_q = NULL; +static char pretimeout_since_last_heartbeat = 0; + MODULE_PARM(timeout, "i"); MODULE_PARM(pretimeout, "i"); MODULE_PARM(action, "s"); MODULE_PARM(preaction, "s"); +MODULE_PARM(preop, "s"); /* Default state of the timer. */ static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE; @@ -373,6 +393,15 @@ return ipmi_set_timeout(); } + if (pretimeout_since_last_heartbeat) { + /* A pretimeout occurred, make sure we set the timeout. + We don't want to set the action, though, we want to + leave that alone (thus it can't be combined with the + above operation. */ + pretimeout_since_last_heartbeat = 0; + return ipmi_set_timeout(); + } + down(&heartbeat_lock); atomic_set(&heartbeat_tofree, 2); @@ -552,12 +581,53 @@ size_t count, loff_t *ppos) { + int rv = 0; + wait_queue_t wait; + /* Can't seek (pread) on this device */ if (ppos != &file->f_pos) return -ESPIPE; - /* Also can't read it. */ - return -EINVAL; + if (count <= 0) + return 0; + + /* Reading returns if the pretimeout has gone off, and it only does + it once per pretimeout. */ + spin_lock(&ipmi_read_lock); + if (!data_to_read) { + if (file->f_flags & O_NONBLOCK) { + rv = -EAGAIN; + goto out; + } + + init_waitqueue_entry(&wait, current); + add_wait_queue(&read_q, &wait); + while (!data_to_read) { + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock(&ipmi_read_lock); + schedule(); + spin_lock(&ipmi_read_lock); + } + remove_wait_queue(&read_q, &wait); + + if (signal_pending(current)) { + rv = -ERESTARTSYS; + goto out; + } + } + data_to_read = 0; + + out: + spin_unlock(&ipmi_read_lock); + + if (rv == 0) { + if (copy_to_user(buf, &data_to_read, 1)) + rv = -EFAULT; + else + rv = 1; + } + + return rv; } static int ipmi_open(struct inode *ino, struct file *filep) @@ -580,6 +650,29 @@ } } +static unsigned int ipmi_poll(struct file *file, poll_table *wait) +{ + unsigned int mask = 0; + + poll_wait(file, &read_q, wait); + + spin_lock(&ipmi_read_lock); + if (data_to_read) + mask |= (POLLIN | POLLRDNORM); + spin_unlock(&ipmi_read_lock); + + return mask; +} + +static int ipmi_fasync(int fd, struct file *file, int on) +{ + int result; + + result = fasync_helper(fd, file, on, &fasync_q); + + return (result); +} + static int ipmi_close(struct inode *ino, struct file *filep) { if (minor(ino->i_rdev)==WATCHDOG_MINOR) @@ -590,16 +683,21 @@ #endif ipmi_wdog_open = 0; } + + ipmi_fasync (-1, filep, 0); + return 0; } static struct file_operations ipmi_wdog_fops = { .owner = THIS_MODULE, .read = ipmi_read, + .poll = ipmi_poll, .write = ipmi_write, .ioctl = ipmi_ioctl, .open = ipmi_open, .release = ipmi_close, + .fasync = ipmi_fasync, }; static struct miscdevice ipmi_wdog_miscdev = { @@ -611,7 +709,7 @@ static DECLARE_RWSEM(register_sem); static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg, - void *handler_data) + void *handler_data) { if (msg->msg.data[0] != 0) { printk(KERN_ERR "IPMI Watchdog response: Error %x on cmd %x\n", @@ -624,7 +722,23 @@ static void ipmi_wdog_pretimeout_handler(void *handler_data) { - panic("Watchdog pre-timeout"); + if (preaction_val != WDOG_PRETIMEOUT_NONE) { + if (preop_val == WDOG_PREOP_PANIC) + panic("Watchdog pre-timeout"); + else if (preop_val == WDOG_PREOP_GIVE_DATA) { + spin_lock(&ipmi_read_lock); + data_to_read = 1; + wake_up_interruptible(&read_q); + kill_fasync(&fasync_q, SIGIO, POLL_IN); + + /* On some machines, the heartbeat will give + an error and not work unless we re-enable + the timer. So do so. */ + pretimeout_since_last_heartbeat = 1; + + spin_unlock(&ipmi_read_lock); + } + } } static struct ipmi_user_hndl ipmi_hndlrs = @@ -676,7 +790,7 @@ { /* If no one else handled the NMI, we assume it was the IPMI watchdog. */ - if (!handled) + if ((!handled) && (preop_val == WDOG_PREOP_PANIC)) panic("IPMI watchdog pre-timeout"); return NOTIFY_DONE; } @@ -807,8 +921,27 @@ " none\n", preaction); } + if (strcmp(preop, "preop_none") == 0) { + preop_val = WDOG_PREOP_NONE; + } else if (strcmp(preop, "preop_panic") == 0) { + preop_val = WDOG_PREOP_PANIC; + } else if (strcmp(preop, "preop_give_data") == 0) { + preop_val = WDOG_PREOP_GIVE_DATA; + } else { + action_val = WDOG_PREOP_NONE; + printk("ipmi_watchdog: Unknown preop '%s', defaulting to" + " none\n", preop); + } + #ifdef HAVE_NMI_HANDLER if (preaction_val == WDOG_PRETIMEOUT_NMI) { + if (preop_val == WDOG_PREOP_GIVE_DATA) { + printk(KERN_WARNING + "ipmi_watchdog: Pretimeout op is to give data" + " but NMI pretimeout is enabled, setting" + " pretimeout op to none\n"); + preop_val = WDOG_PREOP_NONE; + } #ifdef CONFIG_X86_LOCAL_APIC if (nmi_watchdog == NMI_IO_APIC) { printk(KERN_WARNING @@ -864,15 +997,15 @@ release_nmi(&ipmi_nmi_handler); #endif + notifier_chain_unregister(&panic_notifier_list, &wdog_panic_notifier); + unregister_reboot_notifier(&wdog_reboot_notifier); + if (! watchdog_user) goto out; /* Make sure no one can call us any more. */ misc_deregister(&ipmi_wdog_miscdev); - notifier_chain_unregister(&panic_notifier_list, &wdog_panic_notifier); - unregister_reboot_notifier(&wdog_reboot_notifier); - /* Disable the timer. */ ipmi_watchdog_state = WDOG_TIMEOUT_NONE; ipmi_set_timeout(); @@ -955,6 +1088,15 @@ } else if (strcmp(option, "start_now") == 0) { start_now = 1; + } + else if (strcmp(option, "preop_none") == 0) { + preop = "preop_none"; + } + else if (strcmp(option, "preop_panic") == 0) { + preop = "preop_panic"; + } + else if (strcmp(option, "preop_none") == 0) { + preop = "preop_give_data"; } else { printk("Unknown IPMI watchdog option: '%s'\n", option); } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/keyboard.c linux.21pre4-ac2/drivers/char/keyboard.c --- linux.21pre4/drivers/char/keyboard.c 2003-01-29 16:26:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/keyboard.c 2003-01-10 16:23:45.000000000 +0000 @@ -64,6 +64,7 @@ void (*kbd_ledfunc)(unsigned int led); EXPORT_SYMBOL(handle_scancode); EXPORT_SYMBOL(kbd_ledfunc); +EXPORT_SYMBOL(kbd_refresh_leds); extern void ctrl_alt_del(void); @@ -94,6 +95,7 @@ static struct tty_struct **ttytab; static struct kbd_struct * kbd = kbd_table; static struct tty_struct * tty; +static unsigned char prev_scancode; void compute_shiftstate(void); @@ -213,7 +215,17 @@ } kbd = kbd_table + fg_console; if ((raw_mode = (kbd->kbdmode == VC_RAW))) { - put_queue(scancode | up_flag); + /* + * The following is a workaround for hardware + * which sometimes send the key release event twice + */ + unsigned char next_scancode = scancode|up_flag; + if (up_flag && next_scancode==prev_scancode) { + /* unexpected 2nd release event */ + } else { + prev_scancode=next_scancode; + put_queue(next_scancode); + } /* we do not return yet, because we want to maintain the key_down array, so that we have the correct values when finishing RAW mode or when changing VT's */ @@ -899,9 +911,9 @@ * Aside from timing (which isn't really that important for * keyboard interrupts as they happen often), using the software * interrupt routines for this thing allows us to easily mask - * this when we don't want any of the above to happen. Not yet - * used, but this allows for easy and efficient race-condition - * prevention later on. + * this when we don't want any of the above to happen. + * This allows for easy and efficient race-condition prevention + * for kbd_ledfunc => input_event(dev, EV_LED, ...) => ... */ static void kbd_bh(unsigned long dummy) { @@ -917,6 +929,18 @@ EXPORT_SYMBOL(keyboard_tasklet); DECLARE_TASKLET_DISABLED(keyboard_tasklet, kbd_bh, 0); +/* + * This allows a newly plugged keyboard to pick the LED state. + * We do it in this seemindly backwards fashion to ensure proper locking. + * Built-in keyboard does refresh on its own. + */ +void kbd_refresh_leds(void) +{ + tasklet_disable(&keyboard_tasklet); + if (ledstate != 0xff && kbd_ledfunc != NULL) kbd_ledfunc(ledstate); + tasklet_enable(&keyboard_tasklet); +} + typedef void (pm_kbd_func) (void); pm_callback pm_kbd_request_override = NULL; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/Makefile linux.21pre4-ac2/drivers/char/Makefile --- linux.21pre4/drivers/char/Makefile 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/Makefile 2003-01-28 16:35:25.000000000 +0000 @@ -24,7 +24,7 @@ export-objs := busmouse.o console.o keyboard.o sysrq.o \ misc.o pty.o random.o selection.o serial.o \ sonypi.o tty_io.o tty_ioctl.o generic_serial.o \ - au1000_gpio.o hp_psaux.o nvram.o + au1000_gpio.o hp_psaux.o nvram.o scx200.o mod-subdirs := joystick ftape drm drm-4.0 pcmcia @@ -199,6 +199,12 @@ obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o obj-$(CONFIG_N_HDLC) += n_hdlc.o obj-$(CONFIG_SPECIALIX) += specialix.o + +subdir-$(CONFIG_ATI_CD1865) += cd1865 +ifeq ($(CONFIG_ATI_CD1865),y) + obj-y += cd1865/SILX.o +endif + obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o obj-$(CONFIG_A2232) += ser_a2232.o generic_serial.o obj-$(CONFIG_SX) += sx.o generic_serial.o @@ -237,6 +243,7 @@ obj-$(CONFIG_PC110_PAD) += pc110pad.o obj-$(CONFIG_MK712_MOUSE) += mk712.o obj-$(CONFIG_RTC) += rtc.o +obj-$(CONFIG_GEN_RTC) += genrtc.o obj-$(CONFIG_EFI_RTC) += efirtc.o ifeq ($(CONFIG_PPC),) obj-$(CONFIG_NVRAM) += nvram.o @@ -293,7 +300,7 @@ obj-$(CONFIG_SH_WDT) += shwdt.o obj-$(CONFIG_EUROTECH_WDT) += eurotechwdt.o obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o -#obj-$(CONFIG_ALIM1535_WDT) += alim1535d_wdt.o +obj-$(CONFIG_ALIM1535_WDT) += alim1535d_wdt.o obj-$(CONFIG_INDYDOG) += indydog.o obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/mem.c linux.21pre4-ac2/drivers/char/mem.c --- linux.21pre4/drivers/char/mem.c 2003-01-29 16:26:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/mem.c 2003-01-28 16:18:51.000000000 +0000 @@ -520,7 +520,91 @@ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; } -#define mmap_kmem mmap_mem +struct page *kmem_vm_nopage(struct vm_area_struct *vma, unsigned long address, int write) +{ + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + unsigned long kaddr; + pgd_t *pgd; + pmd_t *pmd; + pte_t *ptep, pte; + struct page *page = NULL; + + /* address is user VA; convert to kernel VA of desired page */ + kaddr = (address - vma->vm_start) + offset; + kaddr = VMALLOC_VMADDR(kaddr); + + spin_lock(&init_mm.page_table_lock); + + /* Lookup page structure for kernel VA */ + pgd = pgd_offset(&init_mm, kaddr); + if (pgd_none(*pgd) || pgd_bad(*pgd)) + goto out; + pmd = pmd_offset(pgd, kaddr); + if (pmd_none(*pmd) || pmd_bad(*pmd)) + goto out; + ptep = pte_offset(pmd, kaddr); + if (!ptep) + goto out; + pte = *ptep; + if (!pte_present(pte)) + goto out; + if (write && !pte_write(pte)) + goto out; + page = pte_page(pte); + if (!VALID_PAGE(page)) { + page = NULL; + goto out; + } + + /* Increment reference count on page */ + get_page(page); + +out: + spin_unlock(&init_mm.page_table_lock); + + return page; +} + +struct vm_operations_struct kmem_vm_ops = { + nopage: kmem_vm_nopage, +}; + +static int mmap_kmem(struct file * file, struct vm_area_struct * vma) +{ + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + unsigned long size = vma->vm_end - vma->vm_start; + + /* + * If the user is not attempting to mmap a high memory address then + * the standard mmap_mem mechanism will work. High memory addresses + * need special handling, as remap_page_range expects a physically- + * contiguous range of kernel addresses (such as obtained in kmalloc). + */ + if ((offset + size) < (unsigned long) high_memory) + return mmap_mem(file, vma); + + /* + * Accessing memory above the top the kernel knows about or + * through a file pointer that was marked O_SYNC will be + * done non-cached. + */ + if (noncached_address(offset) || (file->f_flags & O_SYNC)) + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + /* Don't do anything here; "nopage" will fill the holes */ + vma->vm_ops = &kmem_vm_ops; + + /* Don't try to swap out physical pages.. */ + vma->vm_flags |= VM_RESERVED; + + /* + * Don't dump addresses that are not real memory to a core file. + */ + vma->vm_flags |= VM_IO; + + return 0; +} + #define zero_lseek null_lseek #define full_lseek null_lseek #define write_zero write_null diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/mwave/mwavedd.c linux.21pre4-ac2/drivers/char/mwave/mwavedd.c --- linux.21pre4/drivers/char/mwave/mwavedd.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/mwave/mwavedd.c 2003-01-06 19:14:12.000000000 +0000 @@ -279,7 +279,6 @@ pDrvData->IPCs[ipcnum].bIsHere = FALSE; pDrvData->IPCs[ipcnum].bIsEnabled = TRUE; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) - current->nice = -20; /* boost to provide priority timing */ #else current->priority = 0x28; /* boost to provide priority timing */ #endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/n_hdlc.c linux.21pre4-ac2/drivers/char/n_hdlc.c --- linux.21pre4/drivers/char/n_hdlc.c 2003-01-29 16:26:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/n_hdlc.c 2003-01-06 15:38:20.000000000 +0000 @@ -9,7 +9,7 @@ * Al Longyear , Paul Mackerras * * Original release 01/11/99 - * $Id: n_hdlc.c,v 3.3 2001/11/08 16:16:03 paulkf Exp $ + * $Id: n_hdlc.c,v 3.6 2002/12/19 18:58:56 paulkf Exp $ * * This code is released under the GNU General Public License (GPL) * @@ -78,7 +78,7 @@ */ #define HDLC_MAGIC 0x239e -#define HDLC_VERSION "$Revision: 3.3 $" +#define HDLC_VERSION "$Revision: 3.6 $" #include #include @@ -172,9 +172,9 @@ /* * HDLC buffer list manipulation functions */ -void n_hdlc_buf_list_init(N_HDLC_BUF_LIST *list); -void n_hdlc_buf_put(N_HDLC_BUF_LIST *list,N_HDLC_BUF *buf); -N_HDLC_BUF* n_hdlc_buf_get(N_HDLC_BUF_LIST *list); +static void n_hdlc_buf_list_init(N_HDLC_BUF_LIST *list); +static void n_hdlc_buf_put(N_HDLC_BUF_LIST *list,N_HDLC_BUF *buf); +static N_HDLC_BUF* n_hdlc_buf_get(N_HDLC_BUF_LIST *list); /* Local functions */ @@ -186,10 +186,10 @@ /* debug level can be set by insmod for debugging purposes */ #define DEBUG_LEVEL_INFO 1 -int debuglevel=0; +static int debuglevel=0; /* max frame size for memory allocations */ -ssize_t maxframe=4096; +static ssize_t maxframe=4096; /* TTY callbacks */ @@ -265,7 +265,8 @@ } else break; } - + if (n_hdlc->tbuf) + kfree(n_hdlc->tbuf); kfree(n_hdlc); } /* end of n_hdlc_release() */ @@ -905,7 +906,7 @@ * Arguments: list pointer to buffer list * Return Value: None */ -void n_hdlc_buf_list_init(N_HDLC_BUF_LIST *list) +static void n_hdlc_buf_list_init(N_HDLC_BUF_LIST *list) { memset(list,0,sizeof(N_HDLC_BUF_LIST)); spin_lock_init(&list->spinlock); @@ -922,7 +923,7 @@ * * Return Value: None */ -void n_hdlc_buf_put(N_HDLC_BUF_LIST *list,N_HDLC_BUF *buf) +static void n_hdlc_buf_put(N_HDLC_BUF_LIST *list,N_HDLC_BUF *buf) { unsigned long flags; spin_lock_irqsave(&list->spinlock,flags); @@ -952,7 +953,7 @@ * * pointer to HDLC buffer if available, otherwise NULL */ -N_HDLC_BUF* n_hdlc_buf_get(N_HDLC_BUF_LIST *list) +static N_HDLC_BUF* n_hdlc_buf_get(N_HDLC_BUF_LIST *list) { unsigned long flags; N_HDLC_BUF *buf; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/nwflash.c linux.21pre4-ac2/drivers/char/nwflash.c --- linux.21pre4/drivers/char/nwflash.c 2003-01-29 16:26:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/nwflash.c 2003-01-06 17:28:32.000000000 +0000 @@ -154,12 +154,11 @@ if (down_interruptible(&nwflash_sem)) return -ERESTARTSYS; - ret = copy_to_user(buf, (void *)(FLASH_BASE + p), count); - if (ret == 0) { - ret = count; - *ppos += count; - } + ret = count - copy_to_user(buf, (void *)(FLASH_BASE + p), count); + *ppos += ret; up(&nwflash_sem); + if (ret == 0) + ret = -EFAULT; } return ret; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/pc_keyb.c linux.21pre4-ac2/drivers/char/pc_keyb.c --- linux.21pre4/drivers/char/pc_keyb.c 2003-01-29 16:26:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/pc_keyb.c 2003-01-06 15:42:16.000000000 +0000 @@ -1225,41 +1225,13 @@ #endif /* CONFIG_PSMOUSE */ -static int blink_frequency = HZ/2; +void pckbd_blink (char led) { + led = led ? (0x01 | 0x04) : 0x00; -/* Tell the user who may be running in X and not see the console that we have - panic'ed. This is to distingush panics from "real" lockups. - Could in theory send the panic message as morse, but that is left as an - exercise for the reader. */ -void panic_blink(void) -{ - static unsigned long last_jiffie; - static char led; - /* Roughly 1/2s frequency. KDB uses about 1s. Make sure it is - different. */ - if (!blink_frequency) - return; - if (jiffies - last_jiffie > blink_frequency) { - led ^= 0x01 | 0x04; while (kbd_read_status() & KBD_STAT_IBF) mdelay(1); kbd_write_output(KBD_CMD_SET_LEDS); mdelay(1); while (kbd_read_status() & KBD_STAT_IBF) mdelay(1); mdelay(1); kbd_write_output(led); - last_jiffie = jiffies; - } -} - -static int __init panicblink_setup(char *str) -{ - int par; - if (get_option(&str,&par)) - blink_frequency = par*(1000/HZ); - return 1; } - -/* panicblink=0 disables the blinking as it caused problems with some console - switches. otherwise argument is ms of a blink period. */ -__setup("panicblink=", panicblink_setup); - diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/raw.c linux.21pre4-ac2/drivers/char/raw.c --- linux.21pre4/drivers/char/raw.c 2003-01-29 16:26:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/raw.c 2003-01-28 15:47:22.000000000 +0000 @@ -170,7 +170,7 @@ { int minor = minor(inode->i_rdev), err; struct block_device *b; - if (minor < 1 && minor > 255) + if (minor < 1 || minor > 255) return -ENODEV; b = raw_devices[minor].binding; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/serial.c linux.21pre4-ac2/drivers/char/serial.c --- linux.21pre4/drivers/char/serial.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/serial.c 2003-01-06 23:14:19.000000000 +0000 @@ -62,6 +62,10 @@ * Robert Schwebel , * Juergen Beisert , * Theodore Ts'o + * 4/02: added TTY_DO_WRITE_WAKEUP to enable n_tty to send POLL_OUTS + * to waiting processes + * Sapan Bhatia + * */ static char *serial_version = "5.05c"; @@ -203,6 +207,7 @@ #include #include #include +#include #if (LINUX_VERSION_CODE >= 131343) #include #endif @@ -1218,7 +1223,7 @@ if (!page) return -ENOMEM; - save_flags(flags); cli(); + spin_lock_irqsave( &info->irq_spinlock, flags); if (info->flags & ASYNC_INITIALIZED) { free_page(page); @@ -1456,11 +1461,11 @@ change_speed(info, 0); info->flags |= ASYNC_INITIALIZED; - restore_flags(flags); + spin_unlock_irqrestore( &info->irq_spinlock, flags); return 0; errout: - restore_flags(flags); + spin_unlock_irqrestore( &info->irq_spinlock, flags); return retval; } @@ -1484,7 +1489,7 @@ state->irq); #endif - save_flags(flags); cli(); /* Disable interrupts */ + spin_lock_irqsave( &info->irq_spinlock, flags); /* * clear delta_msr_wait queue to avoid mem leaks: we may free the irq @@ -1492,41 +1497,6 @@ */ wake_up_interruptible(&info->delta_msr_wait); - /* - * First unlink the serial port from the IRQ chain... - */ - if (info->next_port) - info->next_port->prev_port = info->prev_port; - if (info->prev_port) - info->prev_port->next_port = info->next_port; - else - IRQ_ports[state->irq] = info->next_port; - figure_IRQ_timeout(state->irq); - - /* - * Free the IRQ, if necessary - */ - if (state->irq && (!IRQ_ports[state->irq] || - !IRQ_ports[state->irq]->next_port)) { - if (IRQ_ports[state->irq]) { - free_irq(state->irq, &IRQ_ports[state->irq]); - retval = request_irq(state->irq, rs_interrupt_single, - SA_SHIRQ, "serial", - &IRQ_ports[state->irq]); - - if (retval) - printk("serial shutdown: request_irq: error %d" - " Couldn't reacquire IRQ.\n", retval); - } else - free_irq(state->irq, &IRQ_ports[state->irq]); - } - - if (info->xmit.buf) { - unsigned long pg = (unsigned long) info->xmit.buf; - info->xmit.buf = 0; - free_page(pg); - } - info->IER = 0; serial_outp(info, UART_IER, 0x00); /* disable all intrs */ #ifdef CONFIG_SERIAL_MANY_PORTS @@ -1583,7 +1553,43 @@ serial_outp(info, UART_IER, UART_IERX_SLEEP); } info->flags &= ~ASYNC_INITIALIZED; - restore_flags(flags); + + /* + * First unlink the serial port from the IRQ chain... + */ + if (info->next_port) + info->next_port->prev_port = info->prev_port; + if (info->prev_port) + info->prev_port->next_port = info->next_port; + else + IRQ_ports[state->irq] = info->next_port; + figure_IRQ_timeout(state->irq); + + /* + * Free the IRQ, if necessary + */ + if (state->irq && (!IRQ_ports[state->irq] || + !IRQ_ports[state->irq]->next_port)) { + if (IRQ_ports[state->irq]) { + free_irq(state->irq, &IRQ_ports[state->irq]); + retval = request_irq(state->irq, rs_interrupt_single, + SA_SHIRQ, "serial", + &IRQ_ports[state->irq]); + + if (retval) + printk("serial shutdown: request_irq: error %d" + " Couldn't reacquire IRQ.\n", retval); + } else + free_irq(state->irq, &IRQ_ports[state->irq]); + } + + if (info->xmit.buf) { + unsigned long pg = (unsigned long) info->xmit.buf; + info->xmit.buf = 0; + free_page(pg); + } + + spin_unlock_irqrestore( &info->irq_spinlock, flags); } #if (LINUX_VERSION_CODE < 131394) /* Linux 2.1.66 */ @@ -3128,6 +3134,7 @@ info->tqueue.routine = do_softint; info->tqueue.data = info; info->state = sstate; + spin_lock_init(&info->irq_spinlock); if (sstate->info) { kfree(info); *ret_info = sstate->info; @@ -3242,6 +3249,7 @@ #ifdef SERIAL_DEBUG_OPEN printk("rs_open ttys%d successful...", info->line); #endif + set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); return 0; } @@ -3655,6 +3663,7 @@ info->io_type = state->io_type; info->iomem_base = state->iomem_base; info->iomem_reg_shift = state->iomem_reg_shift; + info->irq_spinlock= (spinlock_t) SPIN_LOCK_UNLOCKED; save_flags(flags); cli(); @@ -3907,7 +3916,14 @@ case 6: /* BAR 4*/ case 7: base_idx=idx-2; /* BAR 5*/ } - + + /* AFAVLAB uses a different mixture of BARs and offsets */ + /* Not that ugly ;) -- HW */ + if (dev->vendor == PCI_VENDOR_ID_AFAVLAB && idx >= 4) { + base_idx = 4; + offset = (idx - 4) * 8; + } + /* Some Titan cards are also a little weird */ if (dev->vendor == PCI_VENDOR_ID_TITAN && (dev->device == PCI_DEVICE_ID_TITAN_400L || @@ -4311,9 +4327,11 @@ pbn_b0_bt_1_115200, pbn_b0_bt_2_115200, + pbn_b0_bt_8_115200, pbn_b0_bt_1_460800, pbn_b0_bt_2_460800, pbn_b0_bt_2_921600, + pbn_b0_bt_4_460800, pbn_b1_1_115200, pbn_b1_2_115200, @@ -4392,9 +4410,11 @@ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 115200 }, /* pbn_b0_bt_1_115200 */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 2, 115200 }, /* pbn_b0_bt_2_115200 */ + { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 8, 115200 }, /* pbn_b0_bt_8_115200 */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 460800 }, /* pbn_b0_bt_1_460800 */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 2, 460800 }, /* pbn_b0_bt_2_460800 */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 2, 921600 }, /* pbn_b0_bt_2_921600 */ + { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 4, 460800 }, /* pbn_b0_bt_4_460800 */ { SPCI_FL_BASE1, 1, 115200 }, /* pbn_b1_1_115200 */ { SPCI_FL_BASE1, 2, 115200 }, /* pbn_b1_2_115200 */ @@ -4859,6 +4879,12 @@ { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_QUAD_B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_460800 }, + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_OCTO_A, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_4_460800 }, + { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_OCTO_B, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_4_460800 }, { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_SSERIAL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_1_115200 }, @@ -4871,6 +4897,11 @@ PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_bt_2_115200 }, + /* AFAVLAB serial card, from Harald Welte */ + { PCI_VENDOR_ID_AFAVLAB, PCI_DEVICE_ID_AFAVLAB_P028, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_8_115200 }, + /* EKF addition for i960 Boards form EKF with serial port */ { PCI_VENDOR_ID_INTEL, 0x1960, 0xE4BF, PCI_ANY_ID, 0, 0, @@ -4890,6 +4921,7 @@ 0x1048, 0x1500, 0, 0, pbn_b1_1_115200 }, + /* SGI IOC3 board */ { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, 0xFF00, 0, 0, 0, pbn_sgi_ioc3 }, @@ -5542,12 +5574,22 @@ tty_register_devfs(&callout_driver, 0, callout_driver.minor_start + state->line); } +#ifdef CONFIG_SERIAL_GSC + probe_serial_gsc(); +#endif +#ifdef CONFIG_SUPERIO + superio_serial_init(); +#endif #ifdef ENABLE_SERIAL_PCI probe_serial_pci(); #endif #ifdef ENABLE_SERIAL_PNP probe_serial_pnp(); #endif + // FIXME: Clean this one up +#if defined(CONFIG_SERIAL_CONSOLE) && defined(CONFIG_PARISC) + serial_console_init(); +#endif return 0; } @@ -5657,6 +5699,7 @@ info->io_type = req->io_type; info->iomem_base = req->iomem_base; info->iomem_reg_shift = req->iomem_reg_shift; + info->irq_spinlock= (spinlock_t) SPIN_LOCK_UNLOCKED; } autoconfig(state); if (state->type == PORT_UNKNOWN) { @@ -5964,6 +6007,7 @@ info->io_type = state->io_type; info->iomem_base = state->iomem_base; info->iomem_reg_shift = state->iomem_reg_shift; + info->irq_spinlock= (spinlock_t) SPIN_LOCK_UNLOCKED; quot = state->baud_base / baud; cval = cflag & (CSIZE | CSTOPB); #if defined(__powerpc__) || defined(__alpha__) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/sonypi.c linux.21pre4-ac2/drivers/char/sonypi.c --- linux.21pre4/drivers/char/sonypi.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/sonypi.c 2003-01-30 16:53:21.000000000 +0000 @@ -33,6 +33,7 @@ #include #include +#include #include #include #include @@ -56,6 +57,7 @@ static int fnkeyinit; /* = 0 */ static int camera; /* = 0 */ static int compat; /* = 0 */ +static int useinput = 1; static unsigned long mask = 0xffffffff; /* Inits the queue */ @@ -335,6 +337,21 @@ return; found: +#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE) + if (useinput) { + struct input_dev *jog_dev = &sonypi_device.jog_dev; + if (event == SONYPI_EVENT_JOGDIAL_PRESSED) + input_report_key(jog_dev, BTN_MIDDLE, 1); + else if (event == SONYPI_EVENT_ANYBUTTON_RELEASED) + input_report_key(jog_dev, BTN_MIDDLE, 0); + else if ((event == SONYPI_EVENT_JOGDIAL_UP) || + (event == SONYPI_EVENT_JOGDIAL_UP_PRESSED)) + input_report_rel(jog_dev, REL_WHEEL, 1); + else if ((event == SONYPI_EVENT_JOGDIAL_DOWN) || + (event == SONYPI_EVENT_JOGDIAL_DOWN_PRESSED)) + input_report_rel(jog_dev, REL_WHEEL, -1); + } +#endif /* CONFIG_INPUT || CONFIG_INPUT_MODULE */ sonypi_pushq(event); } @@ -579,7 +596,7 @@ -1, "sonypi", &sonypi_misc_fops }; -#if CONFIG_PM +#ifdef CONFIG_PM static int sonypi_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data) { static int old_camera_power; @@ -594,14 +611,14 @@ sonypi_type2_dis(); else sonypi_type1_dis(); -#if !defined(CONFIG_ACPI) +#ifndef CONFIG_ACPI /* disable ACPI mode */ if (fnkeyinit) outb(0xf1, 0xb2); #endif break; case PM_RESUME: -#if !defined(CONFIG_ACPI) +#ifndef CONFIG_ACPI /* Enable ACPI mode to get Fn key events */ if (fnkeyinit) outb(0xf0, 0xb2); @@ -692,7 +709,7 @@ goto out3; } -#if !defined(CONFIG_ACPI) +#ifndef CONFIG_ACPI /* Enable ACPI mode to get Fn key events */ if (fnkeyinit) outb(0xf0, 0xb2); @@ -715,14 +732,15 @@ SONYPI_DRIVER_MINORVERSION); printk(KERN_INFO "sonypi: detected %s model, " "verbose = %d, fnkeyinit = %s, camera = %s, " - "compat = %s, mask = 0x%08lx\n", + "compat = %s, mask = 0x%08lx, useinput = %s\n", (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE1) ? "type1" : "type2", verbose, fnkeyinit ? "on" : "off", camera ? "on" : "off", compat ? "on" : "off", - mask); + mask, + useinput ? "on" : "off"); printk(KERN_INFO "sonypi: enabled at irq=%d, port1=0x%x, port2=0x%x\n", sonypi_device.irq, sonypi_device.ioport1, sonypi_device.ioport2); @@ -730,7 +748,26 @@ printk(KERN_INFO "sonypi: device allocated minor is %d\n", sonypi_misc_device.minor); -#if CONFIG_PM +#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE) + if (useinput) { + /* Initialize the Input Drivers: */ + sonypi_device.jog_dev.evbit[0] = BIT(EV_KEY) | BIT(EV_REL); + sonypi_device.jog_dev.keybit[LONG(BTN_MOUSE)] = BIT(BTN_MIDDLE); + sonypi_device.jog_dev.relbit[0] = BIT(REL_WHEEL); + sonypi_device.jog_dev.name = (char *) kmalloc( + sizeof(SONYPI_INPUTNAME), GFP_KERNEL); + sprintf(sonypi_device.jog_dev.name, SONYPI_INPUTNAME); + sonypi_device.jog_dev.idbus = BUS_ISA; + sonypi_device.jog_dev.idvendor = PCI_VENDOR_ID_SONY; + + input_register_device(&sonypi_device.jog_dev); + printk(KERN_INFO "%s installed at input event device #%d.\n", + sonypi_device.jog_dev.name, + sonypi_device.jog_dev.number); + } +#endif /* CONFIG_INPUT || CONFIG_INPUT_MODULE */ + +#ifdef CONFIG_PM sonypi_device.pm = pm_register(PM_PCI_DEV, 0, sonypi_pm_callback); #endif @@ -746,18 +783,26 @@ static void __devexit sonypi_remove(void) { -#if CONFIG_PM +#ifdef CONFIG_PM pm_unregister(sonypi_device.pm); #endif sonypi_call2(0x81, 0); /* make sure we don't get any more events */ + +#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE) + if (useinput) { + input_unregister_device(&sonypi_device.jog_dev); + kfree(sonypi_device.jog_dev.name); + } +#endif /* CONFIG_INPUT || CONFIG_INPUT_MODULE */ + if (camera) sonypi_camera_off(); if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) sonypi_type2_dis(); else sonypi_type1_dis(); -#if !defined(CONFIG_ACPI) +#ifndef CONFIG_ACPI /* disable ACPI mode */ if (fnkeyinit) outb(0xf1, 0xb2); @@ -787,7 +832,7 @@ #ifndef MODULE static int __init sonypi_setup(char *str) { - int ints[7]; + int ints[8]; str = get_options(str, ARRAY_SIZE(ints), ints); if (ints[0] <= 0) @@ -808,6 +853,9 @@ if (ints[0] == 5) goto out; mask = ints[6]; + if (ints[0] == 6) + goto out; + useinput = ints[7]; out: return 1; } @@ -836,5 +884,7 @@ MODULE_PARM_DESC(compat, "set this if you want to enable backward compatibility mode"); MODULE_PARM(mask, "i"); MODULE_PARM_DESC(mask, "set this to the mask of event you want to enable (see doc)"); +MODULE_PARM(useinput, "i"); +MODULE_PARM_DESC(useinput, "if you have a jogdial, set this if you would like it to use the modern Linux Input Driver system"); EXPORT_SYMBOL(sonypi_camera_command); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/char/sonypi.h linux.21pre4-ac2/drivers/char/sonypi.h --- linux.21pre4/drivers/char/sonypi.h 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/char/sonypi.h 2003-02-04 15:54:22.000000000 +0000 @@ -37,7 +37,7 @@ #ifdef __KERNEL__ #define SONYPI_DRIVER_MAJORVERSION 1 -#define SONYPI_DRIVER_MINORVERSION 17 +#define SONYPI_DRIVER_MINORVERSION 18 #define SONYPI_DEVICE_MODEL_TYPE1 1 #define SONYPI_DEVICE_MODEL_TYPE2 2 @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include "linux/sonypi.h" @@ -334,6 +335,9 @@ unsigned char buf[SONYPI_BUF_SIZE]; }; +/* The name of the Jog Dial for the input device drivers */ +#define SONYPI_INPUTNAME "Sony VAIO Jog Dial" + struct sonypi_device { struct pci_dev *dev; u16 irq; @@ -347,7 +351,10 @@ struct sonypi_queue queue; int open_count; int model; -#if CONFIG_PM +#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE) + struct input_dev jog_dev; +#endif +#ifdef CONFIG_PM struct pm_dev *pm; #endif }; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/hotplug/Config.in linux.21pre4-ac2/drivers/hotplug/Config.in --- linux.21pre4/drivers/hotplug/Config.in 2003-01-29 16:27:05.000000000 +0000 +++ linux.21pre4-ac2/drivers/hotplug/Config.in 2003-01-06 17:28:43.000000000 +0000 @@ -6,11 +6,11 @@ dep_tristate 'Support for PCI Hotplug (EXPERIMENTAL)' CONFIG_HOTPLUG_PCI $CONFIG_EXPERIMENTAL $CONFIG_PCI +dep_tristate ' ACPI PCI Hotplug driver' CONFIG_HOTPLUG_PCI_ACPI $CONFIG_ACPI $CONFIG_HOTPLUG_PCI dep_tristate ' Compaq PCI Hotplug driver' CONFIG_HOTPLUG_PCI_COMPAQ $CONFIG_HOTPLUG_PCI $CONFIG_X86 dep_mbool ' Save configuration into NVRAM on Compaq servers' CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM $CONFIG_HOTPLUG_PCI_COMPAQ if [ "$CONFIG_X86_IO_APIC" = "y" ]; then dep_tristate ' IBM PCI Hotplug driver' CONFIG_HOTPLUG_PCI_IBM $CONFIG_HOTPLUG_PCI $CONFIG_X86_IO_APIC $CONFIG_X86 fi -dep_tristate ' ACPI PCI Hotplug driver' CONFIG_HOTPLUG_PCI_ACPI $CONFIG_ACPI $CONFIG_HOTPLUG_PCI - +dep_tristate ' IBM Thinkpad (20H2999) Docking driver (VERY EXPERIMENTAL) ' CONFIG_HOTPLUG_PCI_H2999 $CONFIG_HOTPLUG_PCI $CONFIG_X86 endmenu diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/hotplug/Makefile linux.21pre4-ac2/drivers/hotplug/Makefile --- linux.21pre4/drivers/hotplug/Makefile 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/hotplug/Makefile 2003-01-06 17:28:43.000000000 +0000 @@ -12,6 +12,7 @@ obj-$(CONFIG_HOTPLUG_PCI_COMPAQ) += cpqphp.o obj-$(CONFIG_HOTPLUG_PCI_IBM) += ibmphp.o obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o +obj-$(CONFIG_HOTPLUG_PCI_H2999) += tp600.o pci_hotplug-objs := pci_hotplug_core.o \ pci_hotplug_util.o diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/hotplug/tp600.c linux.21pre4-ac2/drivers/hotplug/tp600.c --- linux.21pre4/drivers/hotplug/tp600.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/hotplug/tp600.c 2003-01-06 17:28:43.000000000 +0000 @@ -0,0 +1,500 @@ +/* + * Drivers for the IBM 20H2999 found in the IBM thinkpad series + * machines. + * + * This driver was done without documentation from IBM + * + * _ + * { } + * | | All reverse engineering done + * | | in accordance with 92/250/EEC + * .-.! !.-. and Copyright (Computer Programs) + * .-! ! ! !.-. Regulations 1992 (S.I. 1992 No. 3233) + * ! ! ! ; + * \ ; + * \ ; + * ! : + * ! | + * | | + * + * + * Various other IBM's tried to obtain docs but failed. For that + * reason we only support warm not hot undocking at the moment. + * + * Known bugs: + * Sometimes we hang with an IRQ storm. I don't know what + * deals with the IRQ disables yet. (Hot dock) + * Sometimes busmastering (and maybe IRQs) don't come back + * (Seems to be a buffering issue for hot dock) + * + * Yet to do: + * ISA is not yet handled (oh god help us) + * Instead of saving/restoring pci devices we should + * re-enumerate that subtree so you can change devices + * (That also deals with stale save problems) + * We need to do a proper warm save/restore interface + * Bridged cards don't yet work + * + * Usage: + * Load module + * Pray + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "pci_hotplug.h" +#include "tp600.h" + +static struct h2999_dev *testdev; + +/** + * pci_save_slot - save slot PCI data + * @slot: slot to save + * + * Save the slot data from a PCI device + */ + +static void pci_save_slot(struct h2999_slot *s) +{ + int i, n; + + for(i=0;i<8;i++) + { + struct pci_dev *p = pci_find_slot(s->dev->hotplug_bus, PCI_DEVFN(s->slotid, i)); + s->pci[i] = p; + if(p) + { + for(n = 0; n < 64; n++) + pci_read_config_dword(p, n * 4, &s->save[i][n]); +// printk("Saved %02X:%02X.%X\n", +// s->dev->hotplug_bus, s->slotid, i); + } + } +} + +static void pci_restore_slot(struct h2999_slot *s) +{ + int i,n; + + for(i = 0 ; i < 8; i++) + { + if(s->pci[i]) + { + pci_set_power_state(s->pci[i], 0); + + for(n = 0; n < 54; n++) + if(n!=1) + pci_write_config_dword(s->pci[i], n * 4, s->save[i][n]); + pci_write_config_dword(s->pci[i], 4, s->save[i][1]); +// printk("Restored %02X:%02X.%X\n", +// s->dev->hotplug_bus, s->slotid, i); + } + } +} + +/** + * slot_enable - enable H2999 slot + * @slot: slot to enable + * + * Enable a slot. Its not actually clear what this means with + * a hot dock. We can certainly 'discover' the PCI device in the + * slot when asked. + */ + +static int slot_enable(struct hotplug_slot *slot) +{ + struct h2999_slot *s = slot->private; + int i; + pci_restore_slot(s); + for(i=0; i < 8; i++) + { + if(s->pci[i] && (s->drivermap&(1<pci[i]); + } + return 0; +} + +/** + * slot_disable - disable H2999 slot + * @slot: slot to disable + * + * Disable a slot. Its not actually clear what to do here. We could + * report the device as having been removed when we are told to do + * this. + */ + +static int slot_disable(struct hotplug_slot *slot) +{ + struct h2999_slot *s = slot->private; + struct pci_dev *pdev; + int i; + + for(i = 0; i < 8; i++) + { + pdev = s->pci[i]; + /* Hack for now */ + if (pdev && pdev->driver) { + if (!pdev->driver->remove) + return -EBUSY; + } + } + + s->drivermap = 0; + + for(i = 0; i < 8; i++) + { + pdev = s->pci[i]; + if(pdev) + { + if(pdev->driver) + { + s->drivermap|=(1<driver->remove(pdev); + pdev->driver = NULL; + } + } + } + return 0; +} + +/** + * set_attention_status - set attention callback + * @slot: slot to set + * @value: on/off + * + * Called when the hotplug layer wants to set the attention status of + * the hotplug slot. The H2999 doesn't have an attention control (at + * least not that we know of). So we ignore this. + */ + +static int set_attention_status(struct hotplug_slot *slot, u8 value) +{ + return 0; +} + +/** + * hardware_test - test hardware callback + * @slot: slot to test + * value: test to run + * + * The H2999 does not support any hardware tests that we know of. + */ + +static int hardware_test(struct hotplug_slot *slot, u32 value) +{ + return 0; +} + +/** + * get_power_status - power query callback + * @slot; slot to query + * @value: returned state + * + * Called when the hotplug layer wants to ask us if the slot is + * powered. We work on the basis that all slots are powered when + * the unit is docked. This seems to be correct but I've not actually + * rammed a voltmeter into the slots to see if they are cleverer than + * that. + */ + +static int get_power_status(struct hotplug_slot *slot, u8 *value) +{ + struct h2999_slot *s = slot->private; + + /* Slots are all powered when docked */ + if(s->dev->docked > 0) + *value = 1; + else + *value = 0; + return 0; +} + +/** + * get_adapter_status - card presence query + * @slot: slot to query + * @value: returned state + * + * If we are not docked, we know the "slot" is empty. If we are + * docked its a bit more complicated. + */ + +static int get_adapter_status(struct hotplug_slot *slot, u8 *value) +{ + struct h2999_slot *s = slot->private; + + *value = 0; + + if(s->dev->docked) + *value = 1; + return 0; +} + +static struct hotplug_slot_ops h2999_ops = { + THIS_MODULE, + slot_enable, + slot_disable, + set_attention_status, + hardware_test, + get_power_status, + NULL, + NULL, + get_adapter_status +}; + +/** + * h2999_is_docked - check if docked + * @dev: h2999 device + * + * Check if we are currently docked. The method we use at the moment + * relies on poking around behind the bridge. There is no doubt a + * correct way to do this. Maybe one day IBM will be decide to + * actually provide documentation + */ + +static int h2999_is_docked(struct h2999_dev *dev) +{ + struct pci_dev *pdev = pci_find_slot(dev->hotplug_bus, PCI_DEVFN(0,0)); + u32 status; + + if(pdev == NULL) + return 0; /* Shouldnt happen - must be undocked */ + + if(pci_read_config_dword(pdev, PCI_VENDOR_ID, &status)) + return 0; /* Config read failed - its missing */ + + if(status == 0xFFFFFFFFUL) /* Failed */ + return 0; + + /* Must be docked */ + return 1; +} +/** + * h2999_reconfigure_dock - redock event handler + * @dev: h2999 device + * + * A redocking event has occurred. There may also have been an undock + * before hand. If so then the unconfigure routine is called first. + */ + +static void h2999_reconfigure_dock(struct h2999_dev *dev) +{ + int docked, i; + + docked = h2999_is_docked(dev); + + if(docked ^ dev->docked) + { + printk("h2999: Now %sdocked.\n", + docked?"":"un"); + if(docked) + { + /* We should do the re-enumeration of the bus here + The current save/restore is a test hack */ + for(i=0; i < H2999_SLOTS; i++) + { + if(dev->slots[i].hotplug_slot.private != &dev->slots[i]) + BUG(); + slot_enable(&dev->slots[i].hotplug_slot); + } + } + else + { + for(i=0; i < H2999_SLOTS; i++) + { + if(slot_disable(&dev->slots[i].hotplug_slot)) + printk(KERN_ERR "h2999: someone undocked while devices were in use, how rude!\n"); + } + } + dev->docked = docked; + } + /* Clear bits */ + pci_write_config_byte(dev->pdev, 0x1f, 0xf0); +} + +/* + * h2999_attach - attach an H2999 bridge + * @pdev: PCI device + * @unused: unused + * + * Called when the PCI layer discovers an H2999 docking bridge is + * present in the system. We scan the bridge to obtain its current + * status and register it with the hot plug layer + */ + +static int __devinit h2999_attach(struct pci_dev *pdev, const struct pci_device_id *unused) +{ + /* PCI core found a new H2999 */ + struct h2999_dev *dev; + u8 bus; + int i; + + dev = kmalloc(sizeof(*dev), GFP_KERNEL); + if(dev == NULL) + goto nomem; + + memset(dev, 0, sizeof(*dev)); + + dev->pdev = pdev; + + pci_read_config_byte(pdev, PCI_SECONDARY_BUS, &bus); + dev->hotplug_bus = bus; + + /* Requires hotplug_bus and pdev are set */ + + dev->docked = h2999_is_docked(dev); + + printk(KERN_INFO "Found IBM 20H2999. Status is %sdocked, docking bus is %d.\n", + dev->docked?"":"un", dev->hotplug_bus); + + /* + * Allow for 8 devices. On the TP600 at least we have + * 0-3 as the onboard devices, and 4-7 as the slots. + * To add more fun there is an ISA bridge which we + * don't really handle yet. + */ + + for(i = 0; i < H2999_SLOTS; i++) + { + struct h2999_slot *s = &dev->slots[i]; + int ret; + + s->hotplug_slot.info = &s->hotplug_info; + s->hotplug_slot.private = s; + s->slotid = i; + s->dev = dev; + s->live = 1; + s->hotplug_slot.ops = &h2999_ops; + s->hotplug_slot.name = s->name; + s->hotplug_info.power_status = dev->docked; + /* FIXME - should probe here + In truth the hp_register ought to call thse as needed! */ + s->hotplug_info.adapter_status = 0; + s->pdev = pci_find_slot(dev->hotplug_bus, PCI_DEVFN(i, 0)); + snprintf(s->name, SLOT_NAME_SIZE, "Dock%d.%d", dev->hotplug_bus, i); + pci_save_slot(s); + ret = pci_hp_register(&s->hotplug_slot); + if(ret) + { + printk(KERN_ERR "pci_hp_register failed for slot %d with error %d\n", i, ret); + s->live = 0; + } + } + pci_set_drvdata(pdev, dev); + + testdev = dev; + return 0; +nomem: + printk(KERN_ERR "h2999_attach: out of memory.\n"); + return -ENOMEM; +} + +/** + * h2999_cleanup - free H2999 memory resources + * @dev: h2999 device + * + * Unregister and free up all of our slots + */ + +static int __devinit h2999_cleanup(struct h2999_dev *dev) +{ + struct h2999_slot *s; + int slot; + + for(slot = 0; slot < H2999_SLOTS; slot++) + { + s = &dev->slots[slot]; + if(s->live) + pci_hp_deregister(&s->hotplug_slot); + } + kfree(dev); +} + +/** + * h2999_detach - an H2999 controller vanished + * @dev: device that vanished + * + * Called when the PCI layer sees the bridge unplugged. At the moment + * this doesn't happen and since its currently unclear what to do + * in the hot plug layer if it does this may be a good thing 8) + */ + +static void __devinit h2999_detach(struct pci_dev *pdev) +{ + struct h2999_dev *dev = pci_get_drvdata(pdev); + h2999_cleanup(dev); +} + + +static struct pci_device_id h2999_id_tbl[] __devinitdata = { + { PCI_VENDOR_ID_IBM, 0x0095, PCI_ANY_ID, PCI_ANY_ID, }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, h2999_id_tbl); + +static struct pci_driver h2999_driver = { + name: "h2999", + id_table: h2999_id_tbl, + probe: h2999_attach, + remove: __devexit_p(h2999_detach) + /* FIXME - PM functions */ +}; + +/* + * Test harness + */ + +static struct completion thread_done; + +static int h2999_thread(void *unused) +{ + lock_kernel(); + while(testdev != NULL) + { + set_current_state(TASK_INTERRUPTIBLE); + if(signal_pending(current)) + break; + schedule_timeout(HZ); + h2999_reconfigure_dock(testdev); + } + unlock_kernel(); + complete_and_exit(&thread_done, 0); +} + +static int __init h2999_init_module(void) +{ + int rc; + printk(KERN_INFO "IBM 20H2999 PCI docking bridge driver v0.01\n"); + + init_completion(&thread_done); + + rc = pci_module_init(&h2999_driver); + if (rc == 0) + { + if( kernel_thread(h2999_thread, NULL, CLONE_SIGHAND) >= 0) + return 0; + } + complete(&thread_done); + return rc; +} + +static void __exit h2999_cleanup_module(void) +{ + pci_unregister_driver(&h2999_driver); + wait_for_completion(&thread_done); +} + +module_init(h2999_init_module); +module_exit(h2999_cleanup_module); + +MODULE_AUTHOR("Alan Cox"); +MODULE_DESCRIPTION("IBM 20H2999 Docking Bridge Driver"); +MODULE_LICENSE("GPL"); + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/hotplug/tp600.h linux.21pre4-ac2/drivers/hotplug/tp600.h --- linux.21pre4/drivers/hotplug/tp600.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/hotplug/tp600.h 2003-01-06 17:28:43.000000000 +0000 @@ -0,0 +1,29 @@ +#define SLOT_NAME_SIZE 12 + +struct h2999_slot +{ + int slotid; + + struct hotplug_slot hotplug_slot; + struct hotplug_slot_info hotplug_info; + char name[SLOT_NAME_SIZE]; + + struct h2999_dev *dev; + struct pci_dev *pdev; + int live; + + struct pci_dev *pci[8]; + u32 save[8][64]; + u8 drivermap; +}; + +#define H2999_SLOTS 8 + +struct h2999_dev +{ + int docked; + int hotplug_bus; + struct pci_dev *pdev; + + struct h2999_slot slots[H2999_SLOTS]; +}; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/arm/icside.c linux.21pre4-ac2/drivers/ide/arm/icside.c --- linux.21pre4/drivers/ide/arm/icside.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/arm/icside.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/icside.c + * linux/drivers/ide/arm/icside.c * * Copyright (c) 1996,1997 Russell King. * diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/arm/rapide.c linux.21pre4-ac2/drivers/ide/arm/rapide.c --- linux.21pre4/drivers/ide/arm/rapide.c 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/arm/rapide.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/rapide.c + * linux/drivers/ide/arm/rapide.c * * Copyright (c) 1996-1998 Russell King. * diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/Config.in linux.21pre4-ac2/drivers/ide/Config.in --- linux.21pre4/drivers/ide/Config.in 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/Config.in 2003-01-12 00:31:39.000000000 +0000 @@ -56,7 +56,7 @@ dep_tristate ' Cyrix CS5530 MediaGX chipset support' CONFIG_BLK_DEV_CS5530 $CONFIG_BLK_DEV_IDEDMA_PCI dep_tristate ' HPT34X chipset support' CONFIG_BLK_DEV_HPT34X $CONFIG_BLK_DEV_IDEDMA_PCI dep_mbool ' HPT34X AUTODMA support (WIP)' CONFIG_HPT34X_AUTODMA $CONFIG_BLK_DEV_HPT34X $CONFIG_IDEDMA_PCI_WIP - dep_tristate ' HPT366/368/370 chipset support' CONFIG_BLK_DEV_HPT366 $CONFIG_BLK_DEV_IDEDMA_PCI + dep_tristate ' HPT36X/37X chipset support' CONFIG_BLK_DEV_HPT366 $CONFIG_BLK_DEV_IDEDMA_PCI dep_tristate ' Intel PIIXn chipsets support' CONFIG_BLK_DEV_PIIX $CONFIG_BLK_DEV_IDEDMA_PCI if [ "$CONFIG_MIPS_ITE8172" = "y" -o "$CONFIG_MIPS_IVR" = "y" ]; then dep_mbool ' IT8172 IDE support' CONFIG_BLK_DEV_IT8172 $CONFIG_BLK_DEV_IDEDMA_PCI diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/ide.c linux.21pre4-ac2/drivers/ide/ide.c --- linux.21pre4/drivers/ide/ide.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/ide.c 2003-01-31 17:55:01.000000000 +0000 @@ -863,6 +863,7 @@ hwif->iops = old_hwif.iops; #else hwif->OUTB = old_hwif.OUTB; + hwif->OUTBSYNC = old_hwif.OUTBSYNC; hwif->OUTW = old_hwif.OUTW; hwif->OUTL = old_hwif.OUTL; hwif->OUTSW = old_hwif.OUTSW; @@ -1021,10 +1022,45 @@ EXPORT_SYMBOL(ide_register); -void ide_add_setting (ide_drive_t *drive, const char *name, int rw, int read_ioctl, int write_ioctl, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set) + +/* + * Locks for IDE setting functionality + */ + +DECLARE_MUTEX(ide_setting_sem); +EXPORT_SYMBOL(ide_setting_sem); + +/** + * ide_add_setting - add an ide setting option + * @drive: drive to use + * @name: setting name + * @rw: true if the function is read write + * @read_ioctl: function to call on read + * @write_ioctl: function to call on write + * @data_type: type of data + * @min: range minimum + * @max: range maximum + * @mul_factor: multiplication scale + * @div_factor: divison scale + * @data: private data field + * @set: setting + * + * Removes the setting named from the device if it is present. + * The function takes the settings_lock to protect against + * parallel changes. This function must not be called from IRQ + * context. Returns 0 on success or -1 on failure. + * + * BUGS: This code is seriously over-engineered. There is also + * magic about how the driver specific features are setup. If + * a driver is attached we assume the driver settings are auto + * remove. + */ + +int ide_add_setting (ide_drive_t *drive, const char *name, int rw, int read_ioctl, int write_ioctl, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set) { ide_settings_t **p = (ide_settings_t **) &drive->settings, *setting = NULL; + down(&ide_setting_sem); while ((*p) && strcmp((*p)->name, name) < 0) p = &((*p)->next); if ((setting = kmalloc(sizeof(*setting), GFP_KERNEL)) == NULL) @@ -1043,33 +1079,67 @@ setting->div_factor = div_factor; setting->data = data; setting->set = set; + setting->next = *p; if (drive->driver) setting->auto_remove = 1; *p = setting; - return; + up(&ide_setting_sem); + return 0; abort: + up(&ide_setting_sem); if (setting) kfree(setting); + return -1; } EXPORT_SYMBOL(ide_add_setting); +/** + * ide_remove_setting - remove an ide setting option + * @drive: drive to use + * @name: setting name + * + * Removes the setting named from the device if it is present. + * The function takes the settings_lock to protect against + * parallel changes. This function must not be called from IRQ + * context. + */ + void ide_remove_setting (ide_drive_t *drive, char *name) { - ide_settings_t **p = (ide_settings_t **) &drive->settings, *setting; + ide_settings_t **p, *setting; + + down(&ide_setting_sem); + + p = (ide_settings_t **) &drive->settings; while ((*p) && strcmp((*p)->name, name)) p = &((*p)->next); if ((setting = (*p)) == NULL) + { + up(&ide_setting_sem); return; + } (*p) = setting->next; + + up(&ide_setting_sem); kfree(setting->name); kfree(setting); } EXPORT_SYMBOL(ide_remove_setting); +/** + * ide_find_setting_by_ioctl - find a drive specific ioctl + * @drive: drive to scan + * @cmd: ioctl command to handle + * + * Scan's the device setting table for a matching entry and returns + * this or NULL if no entry is found. The caller must hold the + * setting semaphore + */ + static ide_settings_t *ide_find_setting_by_ioctl (ide_drive_t *drive, int cmd) { ide_settings_t *setting = drive->settings; @@ -1079,9 +1149,20 @@ break; setting = setting->next; } + return setting; } +/** + * ide_find_setting_by_name - find a drive specific setting + * @drive: drive to scan + * @name: setting name + * + * Scan's the device setting table for a matching entry and returns + * this or NULL if no entry is found. The caller must hold the + * setting semaphore + */ + ide_settings_t *ide_find_setting_by_name (ide_drive_t *drive, char *name) { ide_settings_t *setting = drive->settings; @@ -1094,9 +1175,19 @@ return setting; } +/** + * auto_remove_settings - remove driver specific settings + * @drive: drive + * + * Automatically remove all the driver specific settings for this + * drive. This function may sleep and must not be called from IRQ + * context. Takes the settings_lock + */ + static void auto_remove_settings (ide_drive_t *drive) { ide_settings_t *setting; + down(&ide_setting_sem); repeat: setting = drive->settings; while (setting) { @@ -1106,8 +1197,22 @@ } setting = setting->next; } + up(&ide_setting_sem); } +/** + * ide_read_setting - read an IDE setting + * @drive: drive to read from + * @setting: drive setting + * + * Read a drive setting and return the value. The caller + * must hold the ide_setting_sem when making this call. + * + * BUGS: the data return and error are the same return value + * so an error -EINVAL and true return of the same value cannot + * be told apart + */ + int ide_read_setting (ide_drive_t *drive, ide_settings_t *setting) { int val = -EINVAL; @@ -1156,10 +1261,22 @@ EXPORT_SYMBOL(ide_spin_wait_hwgroup); -/* - * FIXME: This should be changed to enqueue a special request - * to the driver to change settings, and then wait on a sema for completion. - * The current scheme of polling is kludgey, though safe enough. +/** + * ide_write_setting - read an IDE setting + * @drive: drive to read from + * @setting: drive setting + * @val: value + * + * Write a drive setting if it is possible. The caller + * must hold the ide_setting_sem when making this call. + * + * BUGS: the data return and error are the same return value + * so an error -EINVAL and true return of the same value cannot + * be told apart + * + * FIXME: This should be changed to enqueue a special request + * to the driver to change settings, and then wait on a sema for completion. + * The current scheme of polling is kludgy, though safe enough. */ int ide_write_setting (ide_drive_t *drive, ide_settings_t *setting, int val) { @@ -1407,16 +1524,22 @@ if ((drive = get_info_ptr(inode->i_rdev)) == NULL) return -ENODEV; + down(&ide_setting_sem); if ((setting = ide_find_setting_by_ioctl(drive, cmd)) != NULL) { if (cmd == setting->read_ioctl) { err = ide_read_setting(drive, setting); + up(&ide_setting_sem); return err >= 0 ? put_user(err, (long *) arg) : err; } else { if ((MINOR(inode->i_rdev) & PARTN_MASK)) - return -EINVAL; - return ide_write_setting(drive, setting, arg); + err = -EINVAL; + else + err = ide_write_setting(drive, setting, arg); + up(&ide_setting_sem); + return err; } } + up(&ide_setting_sem); ide_init_drive_cmd (&rq); switch (cmd) { diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/ide-cd.c linux.21pre4-ac2/drivers/ide/ide-cd.c --- linux.21pre4/drivers/ide/ide-cd.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/ide-cd.c 2003-01-30 17:23:48.000000000 +0000 @@ -863,11 +863,8 @@ HWIF(drive)->OUTB(drive->ctl, IDE_CONTROL_REG); if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt) { - if (HWGROUP(drive)->handler != NULL) - BUG(); - ide_set_handler (drive, handler, WAIT_CMD, cdrom_timer_expiry); /* packet command */ - HWIF(drive)->OUTB(WIN_PACKETCMD, IDE_COMMAND_REG); + ide_execute_command(drive, WIN_PACKETCMD, handler, WAIT_CMD, cdrom_timer_expiry); return ide_started; } else { /* packet command */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/ide-disk.c linux.21pre4-ac2/drivers/ide/ide-disk.c --- linux.21pre4/drivers/ide/ide-disk.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/ide-disk.c 2003-02-04 15:16:44.000000000 +0000 @@ -38,9 +38,11 @@ * Version 1.15 convert all calls to ide_raw_taskfile * since args will return register content. * Version 1.16 added suspend-resume-checkpower + * Version 1.17 do flush on standy, do flush on ATA < ATA6 + * fix wcache setup. */ -#define IDEDISK_VERSION "1.16" +#define IDEDISK_VERSION "1.17" #undef REALLY_SLOW_IO /* most systems can safely undef this */ @@ -67,7 +69,7 @@ #include #include -/* FIXME: soem day we shouldnt need to look in here! */ +/* FIXME: some day we shouldnt need to look in here! */ #include "legacy/pdc4030.h" @@ -467,12 +469,12 @@ #endif /* CONFIG_BLK_DEV_IDEDMA */ if (HWGROUP(drive)->handler != NULL) BUG(); - ide_set_handler(drive, &read_intr, WAIT_CMD, NULL); command = ((drive->mult_count) ? ((lba48) ? WIN_MULTREAD_EXT : WIN_MULTREAD) : ((lba48) ? WIN_READ_EXT : WIN_READ)); - hwif->OUTB(command, IDE_COMMAND_REG); + + ide_execute_command(drive, command, &read_intr, WAIT_CMD, NULL); return ide_started; } else if (rq_data_dir(rq) == WRITE) { ide_startstop_t startstop; @@ -716,6 +718,7 @@ MOD_INC_USE_COUNT; if (drive->removable && drive->usage == 1) { ide_task_t args; + int cf; memset(&args, 0, sizeof(ide_task_t)); args.tfRegister[IDE_COMMAND_OFFSET] = WIN_DOORLOCK; args.command_type = ide_cmd_type_parser(&args); @@ -727,12 +730,38 @@ */ if (drive->doorlocking && ide_raw_taskfile(drive, &args, NULL)) drive->doorlocking = 0; + drive->wcache = 0; + /* Cache enabled ? */ + if (drive->id->csfo & 1) + drive->wcache = 1; + /* Cache command set available ? */ + if (drive->id->cfs_enable_1 & (1<<5)) + drive->wcache = 1; + /* ATA6 cache extended commands */ + cf = drive->id->command_set_2 >> 24; + if((cf & 0xC0) == 0x40 && (cf & 0x30) != 0) + drive->wcache = 1; } return 0; } static int do_idedisk_flushcache(ide_drive_t *drive); +static int ide_cacheflush_p(ide_drive_t *drive) +{ + if(drive->wcache) + { + if (do_idedisk_flushcache(drive)) + { + printk (KERN_INFO "%s: Write Cache FAILED Flushing!\n", + drive->name); + return -EIO; + } + return 1; + } + return 0; +} + static void idedisk_release (struct inode *inode, struct file *filp, ide_drive_t *drive) { if (drive->removable && !drive->usage) { @@ -744,10 +773,7 @@ if (drive->doorlocking && ide_raw_taskfile(drive, &args, NULL)) drive->doorlocking = 0; } - if ((drive->id->cfs_enable_2 & 0x3000) && drive->wcache) - if (do_idedisk_flushcache(drive)) - printk (KERN_INFO "%s: Write Cache FAILED Flushing!\n", - drive->name); + ide_cacheflush_p(drive); MOD_DEC_USE_COUNT; } @@ -1435,7 +1461,7 @@ { if (drive->suspend_reset) return 1; - + ide_cacheflush_p(drive); return call_idedisk_suspend(drive, 0); } @@ -1679,10 +1705,7 @@ static int idedisk_cleanup(ide_drive_t *drive) { - if ((drive->id->cfs_enable_2 & 0x3000) && drive->wcache) - if (do_idedisk_flushcache(drive)) - printk (KERN_INFO "%s: Write Cache FAILED Flushing!\n", - drive->name); + ide_cacheflush_p(drive); return ide_unregister_subdriver(drive); } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/ide-dma.c linux.21pre4-ac2/drivers/ide/ide-dma.c --- linux.21pre4/drivers/ide/ide-dma.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/ide-dma.c 2003-02-04 15:19:26.000000000 +0000 @@ -257,17 +257,7 @@ bh = rq->bh; do { - int contig = 0; - - if (bh->b_page) { - if (bh_phys(bh) == lastdataend) - contig = 1; - } else { - if ((unsigned long) bh->b_data == lastdataend) - contig = 1; - } - - if (contig) { + if (bh_phys(bh) == lastdataend) { sg[nents - 1].length += bh->b_size; lastdataend += bh->b_size; continue; @@ -277,20 +267,18 @@ return 0; memset(&sg[nents], 0, sizeof(*sg)); - if (bh->b_page) { sg[nents].page = bh->b_page; sg[nents].offset = bh_offset(bh); - lastdataend = bh_phys(bh) + bh->b_size; } else { - if ((unsigned long) bh->b_data < PAGE_SIZE) + if (((unsigned long) bh->b_data) < PAGE_SIZE) BUG(); sg[nents].address = bh->b_data; - lastdataend = (unsigned long) bh->b_data + bh->b_size; } sg[nents].length = bh->b_size; + lastdataend = bh_phys(bh) + bh->b_size; nents++; } while ((bh = bh->b_reqnext) != NULL); @@ -689,11 +677,6 @@ drive->waiting_for_dma = 1; if (drive->media != ide_disk) return 0; - /* paranoia check */ - if (HWGROUP(drive)->handler != NULL) - BUG(); - ide_set_handler(drive, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry); - /* * FIX ME to use only ACB ide_task_t args Struct */ @@ -710,8 +693,7 @@ } #endif /* issue cmd to drive */ - hwif->OUTB(command, IDE_COMMAND_REG); - + ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry); return HWIF(drive)->ide_dma_count(drive); } @@ -741,10 +723,6 @@ drive->waiting_for_dma = 1; if (drive->media != ide_disk) return 0; - /* paranoia check */ - if (HWGROUP(drive)->handler != NULL) - BUG(); - ide_set_handler(drive, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry); /* * FIX ME to use only ACB ide_task_t args Struct */ @@ -761,7 +739,7 @@ } #endif /* issue cmd to drive */ - hwif->OUTB(command, IDE_COMMAND_REG); + ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry); return HWIF(drive)->ide_dma_count(drive); } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/ide-floppy.c linux.21pre4-ac2/drivers/ide/ide-floppy.c --- linux.21pre4/drivers/ide/ide-floppy.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/ide-floppy.c 2003-02-04 15:21:39.000000000 +0000 @@ -1123,14 +1123,11 @@ } if (test_bit(IDEFLOPPY_DRQ_INTERRUPT, &floppy->flags)) { - if (HWGROUP(drive)->handler != NULL) - BUG(); - ide_set_handler(drive, + /* Issue the packet command */ + ide_execute_command(drive, WIN_PACKETCMD, pkt_xfer_routine, IDEFLOPPY_WAIT_CMD, NULL); - /* Issue the packet command */ - HWIF(drive)->OUTB(WIN_PACKETCMD, IDE_COMMAND_REG); return ide_started; } else { /* Issue the packet command */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/ide-io.c linux.21pre4-ac2/drivers/ide/ide-io.c --- linux.21pre4/drivers/ide/ide-io.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/ide-io.c 2003-02-04 15:24:00.000000000 +0000 @@ -363,14 +363,11 @@ void ide_cmd (ide_drive_t *drive, u8 cmd, u8 nsect, ide_handler_t *handler) { ide_hwif_t *hwif = HWIF(drive); - if (HWGROUP(drive)->handler != NULL) - BUG(); - ide_set_handler(drive, handler, WAIT_CMD, NULL); if (IDE_CONTROL_REG) hwif->OUTB(drive->ctl,IDE_CONTROL_REG); /* clear nIEN */ SELECT_MASK(drive,0); hwif->OUTB(nsect,IDE_NSECTOR_REG); - hwif->OUTB(cmd,IDE_COMMAND_REG); + ide_execute_command(drive, cmd, handler, WAIT_CMD, NULL); } EXPORT_SYMBOL(ide_cmd); @@ -833,14 +830,14 @@ * happens anyway when any interrupt comes in, IDE or otherwise * -- the kernel masks the IRQ while it is being handled. */ - if (masked_irq && hwif->irq != masked_irq) + if (hwif->irq != masked_irq) disable_irq_nosync(hwif->irq); spin_unlock(&io_request_lock); local_irq_enable(); /* allow other IRQs while we start this request */ startstop = start_request(drive, rq); spin_lock_irq(&io_request_lock); - if (masked_irq && hwif->irq != masked_irq) + if (hwif->irq != masked_irq) enable_irq(hwif->irq); if (startstop == ide_stopped) hwgroup->busy = 0; @@ -866,7 +863,7 @@ */ void do_ide_request(request_queue_t *q) { - ide_do_request(q->queuedata, 0); + ide_do_request(q->queuedata, IDE_NO_IRQ); } /* @@ -1014,7 +1011,7 @@ hwgroup->busy = 0; } } - ide_do_request(hwgroup, 0); + ide_do_request(hwgroup, IDE_NO_IRQ); spin_unlock_irqrestore(&io_request_lock, flags); } @@ -1319,7 +1316,7 @@ queue_head = queue_head->next; } list_add(&rq->queue, queue_head); - ide_do_request(hwgroup, 0); + ide_do_request(hwgroup, IDE_NO_IRQ); spin_unlock_irqrestore(&io_request_lock, flags); if (action == ide_wait) { /* wait for it to be serviced */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/ide-iops.c linux.21pre4-ac2/drivers/ide/ide-iops.c --- linux.21pre4/drivers/ide/ide-iops.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/ide-iops.c 2003-02-04 15:24:11.000000000 +0000 @@ -31,6 +31,74 @@ #include #include +/* + * IDE operator we assign to an unplugged device so that + * we don't trash new hardware assigned the same resources + */ + +static u8 ide_unplugged_inb (unsigned long port) +{ + return 0xFF; +} + +static u16 ide_unplugged_inw (unsigned long port) +{ + return 0xFFFF; +} + +static void ide_unplugged_insw (unsigned long port, void *addr, u32 count) +{ +} + +static u32 ide_unplugged_inl (unsigned long port) +{ + return 0xFFFFFFFF; +} + +static void ide_unplugged_insl (unsigned long port, void *addr, u32 count) +{ +} + +static void ide_unplugged_outb (u8 addr, unsigned long port) +{ +} + +static void ide_unplugged_outw (u16 addr, unsigned long port) +{ +} + +static void ide_unplugged_outsw (unsigned long port, void *addr, u32 count) +{ +} + +static void ide_unplugged_outl (u32 addr, unsigned long port) +{ +} + +static void ide_unplugged_outsl (unsigned long port, void *addr, u32 count) +{ +} + +void unplugged_hwif_iops (ide_hwif_t *hwif) +{ + hwif->OUTB = ide_unplugged_outb; + hwif->OUTBSYNC = ide_unplugged_outb; + hwif->OUTW = ide_unplugged_outw; + hwif->OUTL = ide_unplugged_outl; + hwif->OUTSW = ide_unplugged_outsw; + hwif->OUTSL = ide_unplugged_outsl; + hwif->INB = ide_unplugged_inb; + hwif->INW = ide_unplugged_inw; + hwif->INL = ide_unplugged_inl; + hwif->INSW = ide_unplugged_insw; + hwif->INSL = ide_unplugged_insl; +} + +EXPORT_SYMBOL(unplugged_hwif_iops); + +/* + * Conventional PIO operations for ATA devices + */ static u8 ide_inb (unsigned long port) { @@ -85,6 +153,7 @@ void default_hwif_iops (ide_hwif_t *hwif) { hwif->OUTB = ide_outb; + hwif->OUTBSYNC = ide_outb; hwif->OUTW = ide_outw; hwif->OUTL = ide_outl; hwif->OUTSW = ide_outsw; @@ -98,6 +167,10 @@ EXPORT_SYMBOL(default_hwif_iops); +/* + * MMIO operations, typically used for SATA controllers + */ + static u8 ide_mm_inb (unsigned long port) { return (u8) readb(port); @@ -151,6 +224,9 @@ void default_hwif_mmiops (ide_hwif_t *hwif) { hwif->OUTB = ide_mm_outb; + /* Most systems will need to override OUTBSYNC, alas however + this one is controller specific! */ + hwif->OUTBSYNC = ide_mm_outb; hwif->OUTW = ide_mm_outw; hwif->OUTL = ide_mm_outl; hwif->OUTSW = ide_mm_outsw; @@ -872,7 +948,7 @@ if (speed >= XFER_SW_DMA_0) hwif->ide_dma_host_on(drive); else - hwif->ide_dma_off(drive); + hwif->ide_dma_off_quietly(drive); #endif /* (CONFIG_BLK_DEV_IDEDMA) && !(CONFIG_DMA_NONPCI) */ switch(speed) { @@ -907,6 +983,8 @@ * at the appropriate code to handle the next interrupt, and a * timer is started to prevent us from waiting forever in case * something goes wrong (see the ide_timer_expiry() handler later on). + * + * See also ide_execute_command */ void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry) @@ -916,7 +994,7 @@ spin_lock_irqsave(&io_request_lock, flags); if (hwgroup->handler != NULL) { - printk("%s: ide_set_handler: handler not null; " + printk(KERN_CRIT "%s: ide_set_handler: handler not null; " "old=%p, new=%p\n", drive->name, hwgroup->handler, handler); } @@ -929,6 +1007,46 @@ EXPORT_SYMBOL(ide_set_handler); +/** + * ide_execute_command - execute an IDE command + * @drive: IDE drive to issue the command against + * @command: command byte to write + * @handler: handler for next phase + * @timeout: timeout for command + * @expiry: handler to run on timeout + * + * Helper function to issue an IDE command. This handles the + * atomicity requirements, command timing and ensures that the + * handler and IRQ setup do not race. All IDE command kick off + * should go via this function or do equivalent locking. + */ + +void ide_execute_command(ide_drive_t *drive, task_ioreg_t cmd, ide_handler_t *handler, unsigned timeout, ide_expiry_t *expiry) +{ + unsigned long flags; + ide_hwgroup_t *hwgroup = HWGROUP(drive); + ide_hwif_t *hwif = HWIF(drive); + + spin_lock_irqsave(&io_request_lock, flags); + + if(hwgroup->handler) + BUG(); + hwgroup->handler = handler; + hwgroup->expiry = expiry; + hwgroup->timer.expires = jiffies + timeout; + add_timer(&hwgroup->timer); + hwif->OUTBSYNC(cmd, IDE_COMMAND_REG); + /* Drive takes 400nS to respond, we must avoid the IRQ being + serviced before that. + + FIXME: we could skip this delay with care on non shared + devices + */ + ndelay(400); + spin_unlock_irqrestore(&io_request_lock, flags); +} + +EXPORT_SYMBOL(ide_execute_command); /* needed below */ ide_startstop_t do_reset1 (ide_drive_t *, int); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/ide-probe.c linux.21pre4-ac2/drivers/ide/ide-probe.c --- linux.21pre4/drivers/ide/ide-probe.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/ide-probe.c 2003-02-04 14:57:08.000000000 +0000 @@ -590,6 +590,72 @@ //EXPORT_SYMBOL(hwif_register); +/* Enable code below on all archs later, for now, I want it on PPC + */ +#ifdef CONFIG_PPC +/* + * This function waits for the hwif to report a non-busy status + * see comments in probe_hwif() + */ +static int wait_not_busy(ide_hwif_t *hwif, unsigned long timeout) +{ + u8 stat = 0; + + while(timeout--) { + /* Turn this into a schedule() sleep once I'm sure + * about locking issues (2.5 work ?) + */ + mdelay(1); + stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); + if ((stat & BUSY_STAT) == 0) + break; + /* Assume a value of 0xff means nothing is connected to + * the interface and it doesn't implement the pull-down + * resistor on D7 + */ + if (stat == 0xff) + break; + } + return ((stat & BUSY_STAT) == 0) ? 0 : -EBUSY; +} + +static int wait_hwif_ready(ide_hwif_t *hwif) +{ + int rc; + + printk("Probing IDE interface %s...\n", hwif->name); + + /* Let HW settle down a bit from whatever init state we + * come from */ + mdelay(2); + + /* Wait for BSY bit to go away, spec timeout is 30 seconds, + * I know of at least one disk who takes 31 seconds, I use 35 + * here to be safe + */ + rc = wait_not_busy(hwif, 35000); + if (rc) + return rc; + + /* Now make sure both master & slave are ready */ + SELECT_DRIVE(&hwif->drives[0]); + hwif->OUTB(8, hwif->io_ports[IDE_CONTROL_OFFSET]); + mdelay(2); + rc = wait_not_busy(hwif, 10000); + if (rc) + return rc; + SELECT_DRIVE(&hwif->drives[1]); + hwif->OUTB(8, hwif->io_ports[IDE_CONTROL_OFFSET]); + mdelay(2); + rc = wait_not_busy(hwif, 10000); + + /* Exit function with master reselected (let's be sane) */ + SELECT_DRIVE(&hwif->drives[0]); + + return rc; +} +#endif /* CONFIG_PPC */ + /* * This routine only knows how to look for drive units 0 and 1 * on an interface, so any setting of MAX_DRIVES > 2 won't work here. @@ -638,6 +704,31 @@ disable_irq(hwif->irq); local_irq_set(flags); + +#ifdef CONFIG_PPC + /* This is needed on some PPCs and a bunch of BIOS-less embedded + * platforms. Typical cases are: + * + * - The firmware hard reset the disk before booting the kernel, + * the drive is still doing it's poweron-reset sequence, that + * can take up to 30 seconds + * - The firmware does nothing (or no firmware), the device is + * still in POST state (same as above actually). + * - Some CD/DVD/Writer combo drives tend to drive the bus during + * their reset sequence even when they are non-selected slave + * devices, thus preventing discovery of the main HD + * + * Doing this wait-for-busy should not harm any existing configuration + * (at least things won't be worse than what current code does, that + * is blindly go & talk to the drive) and fix some issues like the + * above. + * + * BenH. + */ + if (wait_hwif_ready(hwif)) + printk(KERN_WARNING "%s: Wait for ready failed before probe !\n", hwif->name); +#endif /* CONFIG_PPC */ + /* * Second drive should only exist if first drive was found, * but a lot of cdrom drives are configured as single slaves. diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/ide-proc.c linux.21pre4-ac2/drivers/ide/ide-proc.c --- linux.21pre4/drivers/ide/ide-proc.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/ide-proc.c 2003-01-31 17:41:41.000000000 +0000 @@ -488,6 +488,7 @@ char *out = page; int len, rc, mul_factor, div_factor; + down(&ide_setting_sem); out += sprintf(out, "name\t\t\tvalue\t\tmin\t\tmax\t\tmode\n"); out += sprintf(out, "----\t\t\t-----\t\t---\t\t---\t\t----\n"); while(setting) { @@ -507,6 +508,7 @@ setting = setting->next; } len = out - page; + up(&ide_setting_sem); PROC_IDE_READ_RETURN(page,start,off,count,eof,len); } @@ -575,12 +577,17 @@ --n; ++p; } + + down(&ide_setting_sem); setting = ide_find_setting_by_name(drive, name); if (!setting) + { + up(&ide_setting_sem); goto parse_error; - + } if (for_real) ide_write_setting(drive, setting, val * setting->div_factor / setting->mul_factor); + up(&ide_setting_sem); } } while (!for_real++); return count; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/ide-tape.c linux.21pre4-ac2/drivers/ide/ide-tape.c --- linux.21pre4/drivers/ide/ide-tape.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/ide-tape.c 2003-02-04 15:24:51.000000000 +0000 @@ -2450,13 +2450,10 @@ set_bit(PC_DMA_IN_PROGRESS, &pc->flags); #endif /* CONFIG_BLK_DEV_IDEDMA */ if (test_bit(IDETAPE_DRQ_INTERRUPT, &tape->flags)) { - if (HWGROUP(drive)->handler != NULL) - BUG(); - ide_set_handler(drive, + ide_execute_command(drive, WIN_PACKETCMD, &idetape_transfer_pc, IDETAPE_WAIT_CMD, NULL); - HWIF(drive)->OUTB(WIN_PACKETCMD, IDE_COMMAND_REG); return ide_started; } else { HWIF(drive)->OUTB(WIN_PACKETCMD, IDE_COMMAND_REG); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/ide-taskfile.c linux.21pre4-ac2/drivers/ide/ide-taskfile.c --- linux.21pre4/drivers/ide/ide-taskfile.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/ide-taskfile.c 2003-02-04 15:26:01.000000000 +0000 @@ -201,8 +201,7 @@ hwif->OUTB((taskfile->device_head & HIHI) | drive->select.all, IDE_SELECT_REG); if (task->handler != NULL) { - ide_set_handler(drive, task->handler, WAIT_WORSTCASE, NULL); - hwif->OUTB(taskfile->command, IDE_COMMAND_REG); + ide_execute_command(drive, taskfile->command, task->handler, WAIT_WORSTCASE, NULL); if (task->prehandler != NULL) return task->prehandler(drive, task->rq); return ide_started; @@ -1929,9 +1928,8 @@ if (task->handler == NULL) return ide_stopped; - ide_set_handler(drive, task->handler, WAIT_WORSTCASE, NULL); /* Issue the command */ - hwif->OUTB(taskfile->command, IDE_COMMAND_REG); + ide_execute_command(drive, taskfile->command, task->handler, WAIT_WORSTCASE, NULL); if (task->prehandler != NULL) return task->prehandler(drive, HWGROUP(drive)->rq); } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/legacy/ali14xx.c linux.21pre4-ac2/drivers/ide/legacy/ali14xx.c --- linux.21pre4/drivers/ide/legacy/ali14xx.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/legacy/ali14xx.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/ali14xx.c Version 0.03 Feb 09, 1996 + * linux/drivers/ide/legacy/ali14xx.c Version 0.03 Feb 09, 1996 * * Copyright (C) 1996 Linus Torvalds & author (see below) */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/legacy/buddha.c linux.21pre4-ac2/drivers/ide/legacy/buddha.c --- linux.21pre4/drivers/ide/legacy/buddha.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/legacy/buddha.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/buddha.c -- Amiga Buddha, Catweasel and X-Surf IDE Driver + * linux/drivers/ide/legacy/buddha.c -- Amiga Buddha, Catweasel and X-Surf IDE Driver * * Copyright (C) 1997, 2001 by Geert Uytterhoeven and others * diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/legacy/dtc2278.c linux.21pre4-ac2/drivers/ide/legacy/dtc2278.c --- linux.21pre4/drivers/ide/legacy/dtc2278.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/legacy/dtc2278.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/dtc2278.c Version 0.02 Feb 10, 1996 + * linux/drivers/ide/legacy/dtc2278.c Version 0.02 Feb 10, 1996 * * Copyright (C) 1996 Linus Torvalds & author (see below) */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/legacy/falconide.c linux.21pre4-ac2/drivers/ide/legacy/falconide.c --- linux.21pre4/drivers/ide/legacy/falconide.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/legacy/falconide.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/falconide.c -- Atari Falcon IDE Driver + * linux/drivers/ide/legacy/falconide.c -- Atari Falcon IDE Driver * * Created 12 Jul 1997 by Geert Uytterhoeven * diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/legacy/gayle.c linux.21pre4-ac2/drivers/ide/legacy/gayle.c --- linux.21pre4/drivers/ide/legacy/gayle.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/legacy/gayle.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/gayle.c -- Amiga Gayle IDE Driver + * linux/drivers/ide/legacy/gayle.c -- Amiga Gayle IDE Driver * * Created 9 Jul 1997 by Geert Uytterhoeven * diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/legacy/hd.c linux.21pre4-ac2/drivers/ide/legacy/hd.c --- linux.21pre4/drivers/ide/legacy/hd.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/legacy/hd.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/hd.c + * linux/drivers/ide/legacy/hd.c * * Copyright (C) 1991, 1992 Linus Torvalds */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/legacy/ht6560b.c linux.21pre4-ac2/drivers/ide/legacy/ht6560b.c --- linux.21pre4/drivers/ide/legacy/ht6560b.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/legacy/ht6560b.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/ht6560b.c Version 0.07 Feb 1, 2000 + * linux/drivers/ide/legacy/ht6560b.c Version 0.07 Feb 1, 2000 * * Copyright (C) 1995-2000 Linus Torvalds & author (see below) */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/legacy/ide-cs.c linux.21pre4-ac2/drivers/ide/legacy/ide-cs.c --- linux.21pre4/drivers/ide/legacy/ide-cs.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/legacy/ide-cs.c 2003-01-28 16:22:27.000000000 +0000 @@ -92,10 +92,11 @@ int ndev; dev_node_t node; int hd; + struct tq_struct rel_task; } ide_info_t; static void ide_config(dev_link_t *link); -static void ide_release(u_long arg); +static void ide_release(void *arg); static int ide_event(event_t event, int priority, event_callback_args_t *args); @@ -136,9 +137,8 @@ if (!info) return NULL; memset(info, 0, sizeof(*info)); link = &info->link; link->priv = info; + INIT_TQUEUE(&info->rel_task, ide_release, link); - link->release.function = &ide_release; - link->release.data = (u_long)link; link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; link->io.IOAddrLines = 3; @@ -187,6 +187,7 @@ static void ide_detach(dev_link_t *link) { dev_link_t **linkp; + ide_info_t *info = link->priv; int ret; DEBUG(0, "ide_detach(0x%p)\n", link); @@ -197,9 +198,10 @@ if (*linkp == NULL) return; - del_timer(&link->release); - if (link->state & DEV_CONFIG) - ide_release((u_long)link); + if (link->state & DEV_CONFIG) { + schedule_task(&info->rel_task); + flush_scheduled_tasks(); + } if (link->handle) { ret = CardServices(DeregisterClient, link->handle); @@ -209,7 +211,7 @@ /* Unlink, free device structure */ *linkp = link->next; - kfree(link->priv); + kfree(info); } /* ide_detach */ @@ -381,7 +383,7 @@ cs_failed: cs_error(link->handle, last_fn, last_ret); failed: - ide_release((u_long)link); + ide_release(link); } /* ide_config */ @@ -393,12 +395,15 @@ ======================================================================*/ -void ide_release(u_long arg) +static void ide_release(void *arg) { - dev_link_t *link = (dev_link_t *)arg; + dev_link_t *link = arg; ide_info_t *info = link->priv; - DEBUG(0, "ide_release(0x%p)\n", link); + if (!(link->state & DEV_CONFIG)) + return; + + DEBUG(0, "ide_do_release(0x%p)\n", link); if (info->ndev) { /* FIXME: if this fails we need to queue the cleanup somehow @@ -435,6 +440,7 @@ event_callback_args_t *args) { dev_link_t *link = args->client_data; + ide_info_t *info = link->priv; DEBUG(1, "ide_event(0x%06x)\n", event); @@ -442,7 +448,7 @@ case CS_EVENT_CARD_REMOVAL: link->state &= ~DEV_PRESENT; if (link->state & DEV_CONFIG) - mod_timer(&link->release, jiffies + HZ/20); + schedule_task(&info->rel_task); break; case CS_EVENT_CARD_INSERTION: link->state |= DEV_PRESENT | DEV_CONFIG_PENDING; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/legacy/macide.c linux.21pre4-ac2/drivers/ide/legacy/macide.c --- linux.21pre4/drivers/ide/legacy/macide.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/legacy/macide.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/macide.c -- Macintosh IDE Driver + * linux/drivers/ide/legacy/macide.c -- Macintosh IDE Driver * * Copyright (C) 1998 by Michael Schmitz * diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/legacy/pdc4030.c linux.21pre4-ac2/drivers/ide/legacy/pdc4030.c --- linux.21pre4/drivers/ide/legacy/pdc4030.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/legacy/pdc4030.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* -*- linux-c -*- - * linux/drivers/ide/pdc4030.c Version 0.90 May 27, 1999 + * linux/drivers/ide/legacy/pdc4030.c Version 0.90 May 27, 1999 * * Copyright (C) 1995-2002 Linus Torvalds & authors (see below) */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/legacy/pdc4030.h linux.21pre4-ac2/drivers/ide/legacy/pdc4030.h --- linux.21pre4/drivers/ide/legacy/pdc4030.h 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/legacy/pdc4030.h 2003-02-01 16:51:25.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/pdc4030.h + * linux/drivers/ide/legacy/pdc4030.h * * Copyright (C) 1995-1998 Linus Torvalds & authors */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/legacy/q40ide.c linux.21pre4-ac2/drivers/ide/legacy/q40ide.c --- linux.21pre4/drivers/ide/legacy/q40ide.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/legacy/q40ide.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/q40ide.c -- Q40 I/O port IDE Driver + * linux/drivers/ide/legacy/q40ide.c -- Q40 I/O port IDE Driver * * (c) Richard Zidlicky * diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/legacy/qd65xx.c linux.21pre4-ac2/drivers/ide/legacy/qd65xx.c --- linux.21pre4/drivers/ide/legacy/qd65xx.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/legacy/qd65xx.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/qd65xx.c Version 0.07 Sep 30, 2001 + * linux/drivers/ide/legacy/qd65xx.c Version 0.07 Sep 30, 2001 * * Copyright (C) 1996-2001 Linus Torvalds & author (see below) */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/legacy/qd65xx.h linux.21pre4-ac2/drivers/ide/legacy/qd65xx.h --- linux.21pre4/drivers/ide/legacy/qd65xx.h 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/legacy/qd65xx.h 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/qd65xx.h + * linux/drivers/ide/legacy/qd65xx.h * * Copyright (c) 2000 Linus Torvalds & authors */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/legacy/umc8672.c linux.21pre4-ac2/drivers/ide/legacy/umc8672.c --- linux.21pre4/drivers/ide/legacy/umc8672.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/legacy/umc8672.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/umc8672.c Version 0.05 Jul 31, 1996 + * linux/drivers/ide/legacy/umc8672.c Version 0.05 Jul 31, 1996 * * Copyright (C) 1995-1996 Linus Torvalds & author (see below) */ @@ -137,7 +137,7 @@ unsigned long flags; local_irq_save(flags); - if (check_region(0x108, 2)) { + if (!request_region(0x108, 2, "umc8672")) { local_irq_restore(flags); printk(KERN_ERR "umc8672: ports 0x108-0x109 already in use.\n"); return 1; @@ -146,6 +146,7 @@ if (in_umc (0xd5) != 0xa0) { local_irq_restore(flags); printk(KERN_ERR "umc8672: not found\n"); + release_region(0x108, 2); return 1; } outb_p(0xa5,0x108); /* disable umc */ @@ -153,7 +154,6 @@ umc_set_speeds (current_speeds); local_irq_restore(flags); - request_region(0x108, 2, "umc8672"); ide_hwifs[0].chipset = ide_umc8672; ide_hwifs[1].chipset = ide_umc8672; ide_hwifs[0].tuneproc = &tune_umc; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/adma100.c linux.21pre4-ac2/drivers/ide/pci/adma100.c --- linux.21pre4/drivers/ide/pci/adma100.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/adma100.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/adma100.c -- basic support for Pacific Digital ADMA-100 boards + * linux/drivers/ide/pci/adma100.c -- basic support for Pacific Digital ADMA-100 boards * * Created 09 Apr 2002 by Mark Lord * diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/aec62xx.c linux.21pre4-ac2/drivers/ide/pci/aec62xx.c --- linux.21pre4/drivers/ide/pci/aec62xx.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/aec62xx.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/aec62xx.c Version 0.11 March 27, 2002 + * linux/drivers/ide/pci/aec62xx.c Version 0.11 March 27, 2002 * * Copyright (C) 1999-2002 Andre Hedrick * diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/alim15x3.c linux.21pre4-ac2/drivers/ide/pci/alim15x3.c --- linux.21pre4/drivers/ide/pci/alim15x3.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/alim15x3.c 2003-01-28 16:28:24.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/alim15x3.c Version 0.16 2003/01/02 + * linux/drivers/ide/pci/alim15x3.c Version 0.16 2003/01/02 * * Copyright (C) 1998-2000 Michel Aubry, Maintainer * Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer @@ -19,6 +19,10 @@ * Don't use LBA48 mode on ALi <= 0xC4 * Don't poke 0x79 with a non ALi northbridge * Don't flip undefined bits on newer chipsets (fix Fujitsu laptop hang) + * + * Documentation + * Chipset documentation available under NDA only + * */ #include diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/amd74xx.c linux.21pre4-ac2/drivers/ide/pci/amd74xx.c --- linux.21pre4/drivers/ide/pci/amd74xx.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/amd74xx.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,9 +1,30 @@ /* - * linux/drivers/ide/amd74xx.c Version 0.05 June 9, 2000 + * linux/drivers/ide/pci/amd74xx.c Version 0.05 June 9, 2000 * * Copyright (C) 1999-2000 Andre Hedrick * May be copied or modified under the terms of the GNU General Public License * + * Documentation + * Publically available from AMD + * + * Errata + * AMD 756: Errata #1 + * Single word DMA is not supported by old AMD IDE devices + * + * AMD 766: Errata #15 + * IDE may hang if prefetch is enabled. The BIOS does not enable + * prefetch. + * + * AMD 766: Errata #19 + * Poor UDMA100 performance + * + * AMD 761: Errata #55 + * AMD 762: Errata #56 + * A problem exists with IDE prefetch and the last page before the + * 640K boundary. Users hitting this problem can add a PS/2 mouse + * to the system, causing that page to become an EBDA page and not + * used by the OS. A full work around is under investigation. + * BIOS updates should also avoid this. */ #include diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/cmd640.c linux.21pre4-ac2/drivers/ide/pci/cmd640.c --- linux.21pre4/drivers/ide/pci/cmd640.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/cmd640.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/cmd640.c Version 1.02 Sep 01, 1996 + * linux/drivers/ide/pci/cmd640.c Version 1.02 Sep 01, 1996 * * Copyright (C) 1995-1996 Linus Torvalds & authors (see below) */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/cmd64x.c linux.21pre4-ac2/drivers/ide/pci/cmd64x.c --- linux.21pre4/drivers/ide/pci/cmd64x.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/cmd64x.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,6 +1,6 @@ /* $Id: cmd64x.c,v 1.21 2000/01/30 23:23:16 * - * linux/drivers/ide/cmd64x.c Version 1.30 Sept 10, 2002 + * linux/drivers/ide/pci/cmd64x.c Version 1.30 Sept 10, 2002 * * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines. * Note, this driver is not used at all on other systems because diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/cs5530.c linux.21pre4-ac2/drivers/ide/pci/cs5530.c --- linux.21pre4/drivers/ide/pci/cs5530.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/cs5530.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/cs5530.c Version 0.7 Sept 10, 2002 + * linux/drivers/ide/pci/cs5530.c Version 0.7 Sept 10, 2002 * * Copyright (C) 2000 Andre Hedrick * Ditto of GNU General Public License. @@ -9,6 +9,9 @@ * * Development of this chipset driver was funded * by the nice folks at National Semiconductor. + * + * Documentation: + * CS5530 documentation available from National Semiconductor. */ #include diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/cy82c693.c linux.21pre4-ac2/drivers/ide/pci/cy82c693.c --- linux.21pre4/drivers/ide/pci/cy82c693.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/cy82c693.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/cy82c693.c Version 0.40 Sep. 10, 2002 + * linux/drivers/ide/pci/cy82c693.c Version 0.40 Sep. 10, 2002 * * Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer * Copyright (C) 1998-2002 Andre Hedrick , Integrater diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/generic.c linux.21pre4-ac2/drivers/ide/pci/generic.c --- linux.21pre4/drivers/ide/pci/generic.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/generic.c 2003-01-28 16:28:37.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/generic.c Version 0.11 December 30, 2002 + * linux/drivers/ide/pci/generic.c Version 0.11 December 30, 2002 * * Copyright (C) 2001-2002 Andre Hedrick * Portions (C) Copyright 2002 Red Hat Inc diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/hpt34x.c linux.21pre4-ac2/drivers/ide/pci/hpt34x.c --- linux.21pre4/drivers/ide/pci/hpt34x.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/hpt34x.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/hpt34x.c Version 0.40 Sept 10, 2002 + * linux/drivers/ide/pci/hpt34x.c Version 0.40 Sept 10, 2002 * * Copyright (C) 1998-2000 Andre Hedrick * May be copied or modified under the terms of the GNU General Public License diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/hpt366.c linux.21pre4-ac2/drivers/ide/pci/hpt366.c --- linux.21pre4/drivers/ide/pci/hpt366.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/hpt366.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/hpt366.c Version 0.34 Sept 17, 2002 + * linux/drivers/ide/pci/hpt366.c Version 0.34 Sept 17, 2002 * * Copyright (C) 1999-2002 Andre Hedrick * Portions Copyright (C) 2001 Sun Microsystems, Inc. diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/ns87415.c linux.21pre4-ac2/drivers/ide/pci/ns87415.c --- linux.21pre4/drivers/ide/pci/ns87415.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/ns87415.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/ns87415.c Version 2.00 Sep. 10, 2002 + * linux/drivers/ide/pci/ns87415.c Version 2.00 Sep. 10, 2002 * * Copyright (C) 1997-1998 Mark Lord * Copyright (C) 1998 Eddie C. Dost diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/nvidia.c linux.21pre4-ac2/drivers/ide/pci/nvidia.c --- linux.21pre4/drivers/ide/pci/nvidia.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/nvidia.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/nvidia.c Version 0.01 + * linux/drivers/ide/pci/nvidia.c Version 0.01 * * Copyright (C) 2002-2002 Andre Hedrick * May be copied or modified under the terms of the GNU General Public License diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/opti621.c linux.21pre4-ac2/drivers/ide/pci/opti621.c --- linux.21pre4/drivers/ide/pci/opti621.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/opti621.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/opti621.c Version 0.7 Sept 10, 2002 + * linux/drivers/ide/pci/opti621.c Version 0.7 Sept 10, 2002 * * Copyright (C) 1996-1998 Linus Torvalds & authors (see below) */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/pdc202xx_new.c linux.21pre4-ac2/drivers/ide/pci/pdc202xx_new.c --- linux.21pre4/drivers/ide/pci/pdc202xx_new.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/pdc202xx_new.c 2003-01-30 17:56:23.000000000 +0000 @@ -224,7 +224,7 @@ decode_registers(REG_D, DP); #endif /* PDC202XX_DECODE_REGISTER_INFO */ #if PDC202XX_DEBUG_DRIVE_INFO - printk("%s: %s drive%d 0x%08x ", + printk(KERN_DEBUG "%s: %s drive%d 0x%08x ", drive->name, ide_xfer_verbose(speed), drive->dn, drive_conf); pci_read_config_dword(dev, drive_pci, &drive_conf); @@ -321,7 +321,7 @@ case PCI_DEVICE_ID_PROMISE_20268: cable = pdcnew_new_cable_detect(hwif); #if PDC202_DEBUG_CABLE - printk("%s: %s-pin cable, %s-pin cable, %d\n", + printk(KERN_DEBUG "%s: %s-pin cable, %s-pin cable, %d\n", hwif->name, hwif->udma_four ? "80" : "40", cable ? "40" : "80", cable); #endif /* PDC202_DEBUG_CABLE */ @@ -347,15 +347,15 @@ if ((ultra_66) && (cable)) { #ifdef DEBUG - printk("ULTRA 66/100/133: %s channel of Ultra 66/100/133 " + printk(KERN_DEBUG "ULTRA 66/100/133: %s channel of Ultra 66/100/133 " "requires an 80-pin cable for Ultra66 operation.\n", hwif->channel ? "Secondary" : "Primary"); - printk(" Switching to Ultra33 mode.\n"); + printk(KERN_DEBUG " Switching to Ultra33 mode.\n"); #endif /* DEBUG */ /* Primary : zero out second bit */ /* Secondary : zero out fourth bit */ - printk("Warning: %s channel requires an 80-pin cable for operation.\n", hwif->channel ? "Secondary":"Primary"); - printk("%s reduced to Ultra33 mode.\n", drive->name); + printk(KERN_WARNING "Warning: %s channel requires an 80-pin cable for operation.\n", hwif->channel ? "Secondary":"Primary"); + printk(KERN_WARNING "%s reduced to Ultra33 mode.\n", drive->name); } if (drive->media != ide_disk) @@ -444,7 +444,7 @@ /* * Deleted this because it is redundant from the caller. */ - printk("PDC202XX: %s channel reset.\n", + printk(KERN_WARNING "PDC202XX: %s channel reset.\n", HWIF(drive)->channel ? "Secondary" : "Primary"); } @@ -459,7 +459,7 @@ hwif->OUTB((udma_speed_flag & ~0x10), (high_16|0x001f)); mdelay(2000); /* 2 seconds ?! */ - printk("PDC202XX: %s channel reset.\n", + printk(KERN_WARNING "PDC202XX: %s channel reset.\n", hwif->channel ? "Secondary" : "Primary"); } @@ -513,7 +513,7 @@ if (dev->resource[PCI_ROM_RESOURCE].start) { pci_write_config_dword(dev, PCI_ROM_ADDRESS, dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE); - printk("%s: ROM enabled at 0x%08lx\n", + printk(KERN_INFO "%s: ROM enabled at 0x%08lx\n", name, dev->resource[PCI_ROM_RESOURCE].start); } @@ -555,7 +555,7 @@ hwif->autodma = 1; hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma; #if PDC202_DEBUG_CABLE - printk("%s: %s-pin cable\n", + printk(KERN_DEBUG "%s: %s-pin cable\n", hwif->name, hwif->udma_four ? "80" : "40"); #endif /* PDC202_DEBUG_CABLE */ } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/pdc202xx_old.c linux.21pre4-ac2/drivers/ide/pci/pdc202xx_old.c --- linux.21pre4/drivers/ide/pci/pdc202xx_old.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/pdc202xx_old.c 2003-01-30 17:57:50.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/pdc202xx.c Version 0.36 Sept 11, 2002 + * linux/drivers/ide/pci/pdc202xx_old.c Version 0.36 Sept 11, 2002 * * Copyright (C) 1998-2002 Andre Hedrick * @@ -323,7 +323,7 @@ decode_registers(REG_D, DP); #endif /* PDC202XX_DECODE_REGISTER_INFO */ #if PDC202XX_DEBUG_DRIVE_INFO - printk("%s: %s drive%d 0x%08x ", + printk(KERN_DEBUG "%s: %s drive%d 0x%08x ", drive->name, ide_xfer_verbose(speed), drive->dn, drive_conf); pci_read_config_dword(dev, drive_pci, &drive_conf); @@ -379,7 +379,7 @@ case PCI_DEVICE_ID_PROMISE_20262: cable = pdc202xx_old_cable_detect(hwif); #if PDC202_DEBUG_CABLE - printk("%s: %s-pin cable, %s-pin cable, %d\n", + printk(KERN_DEBUG "%s: %s-pin cable, %s-pin cable, %d\n", hwif->name, hwif->udma_four ? "80" : "40", cable ? "40" : "80", cable); #endif /* PDC202_DEBUG_CABLE */ @@ -408,16 +408,16 @@ if ((ultra_66) && (cable)) { #ifdef DEBUG - printk("ULTRA 66/100/133: %s channel of Ultra 66/100/133 " + printk(KERN_DEBUG "ULTRA 66/100/133: %s channel of Ultra 66/100/133 " "requires an 80-pin cable for Ultra66 operation.\n", hwif->channel ? "Secondary" : "Primary"); - printk(" Switching to Ultra33 mode.\n"); + printk(KERN_DEBUG " Switching to Ultra33 mode.\n"); #endif /* DEBUG */ /* Primary : zero out second bit */ /* Secondary : zero out fourth bit */ hwif->OUTB(CLKSPD & ~mask, (hwif->dma_master + 0x11)); - printk("Warning: %s channel requires an 80-pin cable for operation.\n", hwif->channel ? "Secondary":"Primary"); - printk("%s reduced to Ultra33 mode.\n", drive->name); + printk(KERN_WARNING "Warning: %s channel requires an 80-pin cable for operation.\n", hwif->channel ? "Secondary":"Primary"); + printk(KERN_WARNING "%s reduced to Ultra33 mode.\n", drive->name); } else { if (ultra_66) { /* @@ -620,7 +620,7 @@ hwif->OUTB((udma_speed_flag & ~0x10), (high_16|0x001f)); mdelay(2000); /* 2 seconds ?! */ - printk("PDC202XX: %s channel reset.\n", + printk(KERN_WARNING "PDC202XX: %s channel reset.\n", hwif->channel ? "Secondary" : "Primary"); } @@ -699,7 +699,7 @@ if (dev->resource[PCI_ROM_RESOURCE].start) { pci_write_config_dword(dev, PCI_ROM_ADDRESS, dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE); - printk("%s: ROM enabled at 0x%08lx\n", + printk(KERN_INFO "%s: ROM enabled at 0x%08lx\n", name, dev->resource[PCI_ROM_RESOURCE].start); } @@ -780,7 +780,7 @@ hwif->autodma = 1; hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma; #if PDC202_DEBUG_CABLE - printk("%s: %s-pin cable\n", + printk(KERN_DEBUG "%s: %s-pin cable\n", hwif->name, hwif->udma_four ? "80" : "40"); #endif /* PDC202_DEBUG_CABLE */ } @@ -797,7 +797,7 @@ udma_speed_flag = hwif->INB((dmabase|0x1f)); primary_mode = hwif->INB((dmabase|0x1a)); secondary_mode = hwif->INB((dmabase|0x1b)); - printk("%s: (U)DMA Burst Bit %sABLED " \ + printk(KERN_INFO "%s: (U)DMA Burst Bit %sABLED " \ "Primary %s Mode " \ "Secondary %s Mode.\n", hwif->cds->name, (udma_speed_flag & 1) ? "EN" : "DIS", @@ -806,7 +806,7 @@ #ifdef CONFIG_PDC202XX_BURST if (!(udma_speed_flag & 1)) { - printk("%s: FORCING BURST BIT 0x%02x->0x%02x ", + printk(KERN_INFO "%s: FORCING BURST BIT 0x%02x->0x%02x ", hwif->cds->name, udma_speed_flag, (udma_speed_flag|1)); hwif->OUTB(udma_speed_flag|1,(dmabase|0x1f)); @@ -816,7 +816,7 @@ #endif /* CONFIG_PDC202XX_BURST */ #ifdef CONFIG_PDC202XX_MASTER if (!(primary_mode & 1)) { - printk("%s: FORCING PRIMARY MODE BIT " + printk(KERN_INFO "%s: FORCING PRIMARY MODE BIT " "0x%02x -> 0x%02x ", hwif->cds->name, primary_mode, (primary_mode|1)); hwif->OUTB(primary_mode|1, (dmabase|0x1a)); @@ -825,7 +825,7 @@ } if (!(secondary_mode & 1)) { - printk("%s: FORCING SECONDARY MODE BIT " + printk(KERN_INFO "%s: FORCING SECONDARY MODE BIT " "0x%02x -> 0x%02x ", hwif->cds->name, secondary_mode, (secondary_mode|1)); hwif->OUTB(secondary_mode|1, (dmabase|0x1b)); @@ -850,7 +850,7 @@ if (irq != irq2) { pci_write_config_byte(dev, (PCI_INTERRUPT_LINE)|0x80, irq); /* 0xbc */ - printk("%s: pci-config space interrupt " + printk(KERN_INFO "%s: pci-config space interrupt " "mirror fixed.\n", d->name); } } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/pdcadma.c linux.21pre4-ac2/drivers/ide/pci/pdcadma.c --- linux.21pre4/drivers/ide/pci/pdcadma.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/pdcadma.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/pdcadma.c Version 0.05 Sept 10, 2002 + * linux/drivers/ide/pci/pdcadma.c Version 0.05 Sept 10, 2002 * * Copyright (C) 1999-2000 Andre Hedrick * May be copied or modified under the terms of the GNU General Public License diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/piix.c linux.21pre4-ac2/drivers/ide/pci/piix.c --- linux.21pre4/drivers/ide/pci/piix.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/piix.c 2003-01-28 16:28:47.000000000 +0000 @@ -1,8 +1,10 @@ /* - * linux/drivers/ide/piix.c Version 0.40 August 18, 2002 + * linux/drivers/ide/pci/piix.c Version 0.42 January 11, 2003 * * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer * Copyright (C) 1998-2000 Andre Hedrick + * Copyright (C) 2003 Red Hat Inc + * * May be copied or modified under the terms of the GNU General Public License * * PIO mode setting function for Intel chipsets. @@ -51,6 +53,41 @@ * pci_read_config_word(HWIF(drive)->pci_dev, 0x4a, ®4a); * pci_read_config_word(HWIF(drive)->pci_dev, 0x54, ®54); * + * Documentation + * Publically available from Intel web site. Errata documentation + * is also publically available. As an aide to anyone hacking on this + * driver the list of errata that are relevant is below.going back to + * PIIX4. Older device documentation is now a bit tricky to find. + * + * Errata of note: + * + * Unfixable + * PIIX4 errata #9 - Only on ultra obscure hw + * ICH3 errata #13 - Not observed to affect real hw + * by Intel + * + * Things we must deal with + * PIIX4 errata #10 - BM IDE hang with non UDMA + * (must stop/start dma to recover) + * 440MX errata #15 - As PIIX4 errata #10 + * PIIX4 errata #15 - Must not read control registers + * during a PIO transfer + * 440MX errata #13 - As PIIX4 errata #15 + * ICH2 errata #21 - DMA mode 0 doesn't work right + * ICH0/1 errata #55 - As ICH2 errata #21 + * ICH2 spec c #9 - Extra operations needed to handle + * drive hotswap [NOT YET SUPPORTED] + * ICH2 spec c #20 - IDE PRD must not cross a 64K boundary + * and must be dword aligned + * ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3 + * + * Should have been BIOS fixed: + * 450NX: errata #19 - DMA hangs on old 450NX + * 450NX: errata #20 - DMA hangs on old 450NX + * 450NX: errata #25 - Corruption with DMA on old 450NX + * ICH3 errata #15 - IDE deadlock under high load + * (BIOS must set dev 31 fn 0 bit 23) + * ICH3 errata #18 - Don't use native mode */ #include @@ -77,6 +114,7 @@ #define PIIX_MAX_DEVS 5 static struct pci_dev *piix_devs[PIIX_MAX_DEVS]; static int n_piix_devs; +static int no_piix_dma = 0; /** * piix_get_info - fill in /proc for PIIX ide @@ -443,6 +481,33 @@ } /** + * piix_faulty_dma0 - check for DMA0 errata + * @hwif: IDE interface to check + * + * If an ICH/ICH0/ICH2 interface is is operating in multi-word + * DMA mode with 600nS cycle time the IDE PIO prefetch buffer will + * inadvertently provide an extra piece of secondary data to the primary + * device resulting in data corruption. + * + * With such a device this test function returns true. This allows + * our tuning code to follow Intel recommendations and use PIO on + * such devices. + */ + +static int piix_faulty_dma0(ide_hwif_t *hwif) +{ + switch(hwif->pci_dev->device) + { + case PCI_DEVICE_ID_INTEL_82801AA_1: /* ICH */ + case PCI_DEVICE_ID_INTEL_82801AB_1: /* ICH0 */ + case PCI_DEVICE_ID_INTEL_82801BA_8: /* ICH2 */ + case PCI_DEVICE_ID_INTEL_82801BA_9: /* ICH2 */ + return 1; + } + return 0; +} + +/** * piix_config_drive_for_dma - configure drive for DMA * @drive: IDE drive to configure * @@ -454,8 +519,15 @@ static int piix_config_drive_for_dma (ide_drive_t *drive) { u8 speed = ide_dma_speed(drive, piix_ratemask(drive)); - - if (!(speed)) { + + /* Some ICH devices cannot support DMA mode 0 */ + if(speed == XFER_MW_DMA_0 && piix_faulty_dma0(HWIF(drive))) + speed = 0; + + /* If no DMA speed was available or the chipset has DMA bugs + then disable DMA and use PIO */ + + if (!speed || no_piix_dma) { u8 tspeed = ide_get_best_pio_mode(drive, 255, 5, NULL); speed = piix_dma_2_pio(XFER_PIO_0 + tspeed); } @@ -675,6 +747,37 @@ return 0; } +/** + * piix_check_450nx - Check for problem 450NX setup + * + * Check for the present of 450NX errata #19 and errata #25. If + * they are found, disable use of DMA IDE + */ + +static void __init piix_check_450nx(void) +{ + struct pci_dev *pdev = NULL; + u16 cfg; + u8 rev; + while((pdev=pci_find_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev))!=NULL) + { + /* Look for 450NX PXB. Check for problem configurations + A PCI quirk checks bit 6 already */ + pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); + pci_read_config_word(pdev, 0x41, &cfg); + /* Only on the original revision: IDE DMA can hang */ + if(rev == 0x00) + no_piix_dma = 1; + /* On all revisions PXB bus lock must be disabled for IDE */ + else if(cfg & (1<<14)) + no_piix_dma = 2; + } + if(no_piix_dma) + printk(KERN_WARNING "piix: 450NX errata present, disabling IDE DMA.\n"); + if(no_piix_dma == 2) + printk(KERN_WARNING "piix: A BIOS update may resolve this.\n"); +} + static struct pci_device_id piix_pci_tbl[] __devinitdata = { { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371FB_0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371FB_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, @@ -703,6 +806,7 @@ static int piix_ide_init(void) { + piix_check_450nx(); return ide_pci_register_driver(&driver); } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/rz1000.c linux.21pre4-ac2/drivers/ide/pci/rz1000.c --- linux.21pre4/drivers/ide/pci/rz1000.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/rz1000.c 2003-01-28 16:28:58.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/rz1000.c Version 0.05 December 8, 1997 + * linux/drivers/ide/pci/rz1000.c Version 0.06 January 12, 2003 * * Copyright (C) 1995-1998 Linus Torvalds & author (see below) */ @@ -43,13 +43,13 @@ hwif->chipset = ide_rz1000; if (!pci_read_config_word (dev, 0x40, ®) && !pci_write_config_word(dev, 0x40, reg & 0xdfff)) { - printk("%s: disabled chipset read-ahead " + printk(KERN_INFO "%s: disabled chipset read-ahead " "(buggy RZ1000/RZ1001)\n", hwif->name); } else { hwif->serialized = 1; hwif->drives[0].no_unmask = 1; hwif->drives[1].no_unmask = 1; - printk("%s: serialized, disabled unmasking " + printk(KERN_INFO "%s: serialized, disabled unmasking " "(buggy RZ1000/RZ1001)\n", hwif->name); } } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/sc1200.c linux.21pre4-ac2/drivers/ide/pci/sc1200.c --- linux.21pre4/drivers/ide/pci/sc1200.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/sc1200.c 2003-01-28 16:29:27.000000000 +0000 @@ -1,11 +1,14 @@ /* - * linux/drivers/ide/sc1200.c Version 0.9 24-Oct-2002 + * linux/drivers/ide/pci/sc1200.c Version 0.91 28-Jan-2003 * * Copyright (C) 2000-2002 Mark Lord * May be copied or modified under the terms of the GNU General Public License * * Development of this chipset driver was funded * by the nice folks at National Semiconductor. + * + * Documentation: + * Available from National Semiconductor */ #include @@ -427,8 +430,6 @@ static int sc1200_suspend (struct pci_dev *dev, u32 state) { - ide_hwif_t *hwif = NULL; - printk("SC1200: suspend(%u)\n", state); /* You don't need to iterate over disks -- sysfs should have done that for you already */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/serverworks.c linux.21pre4-ac2/drivers/ide/pci/serverworks.c --- linux.21pre4/drivers/ide/pci/serverworks.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/serverworks.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/serverworks.c Version 0.7 10 Sept 2002 + * linux/drivers/ide/pci/serverworks.c Version 0.7 10 Sept 2002 * * Copyright (C) 1998-2000 Michel Aubry * Copyright (C) 1998-2000 Andrzej Krzysztofowicz @@ -21,6 +21,9 @@ * * CSB6: `Champion South Bridge' IDE Interface (optional: third channel) * + * Documentation: + * Available under NDA only. Errata info very hard to get. + * */ #include diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/siimage.c linux.21pre4-ac2/drivers/ide/pci/siimage.c --- linux.21pre4/drivers/ide/pci/siimage.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/siimage.c 2003-01-30 18:18:25.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/siimage.c Version 1.01 Sept 11, 2002 + * linux/drivers/ide/pci/siimage.c Version 1.02 Jan 30, 2003 * * Copyright (C) 2001-2002 Andre Hedrick */ @@ -248,9 +248,9 @@ { u8 speed = ide_dma_speed(drive, siimage_ratemask(drive)); - config_chipset_for_pio(drive, (!(speed))); + config_chipset_for_pio(drive, !speed); - if ((!(speed))) + if (!speed) return 0; if (ide_set_xfer_rate(drive, speed)) @@ -267,7 +267,7 @@ ide_hwif_t *hwif = HWIF(drive); struct hd_driveid *id = drive->id; - if ((id != NULL) && ((id->capability & 1) != 0) && drive->autodma) { + if (id != NULL && (id->capability & 1) != 0 && drive->autodma) { if (!(hwif->atapi_dma)) goto fast_ata_pio; /* Consult the list of known "bad" drives */ @@ -317,10 +317,9 @@ return 1; /* return 1 if Device INTR asserted */ - if ((pci_read_config_byte(hwif->pci_dev, SELREG(1), &dma_altstat)), - ((dma_altstat & 8) == 8)) + pci_read_config_byte(hwif->pci_dev, SELREG(1), &dma_altstat); + if (dma_altstat & 8) return 0; //return 1; - return 0; } @@ -355,7 +354,7 @@ hwif->OUTL(sata_error, SATA_ERROR_REG); watchdog = (sata_error & 0x00680000) ? 1 : 0; #if 1 - printk("%s: sata_error = 0x%08x, " + printk(KERN_WARNING "%s: sata_error = 0x%08x, " "watchdog = %d, %s\n", drive->name, sata_error, watchdog, __FUNCTION__); @@ -426,7 +425,7 @@ ide_hwif_t *hwif = HWIF(drive); if ((hwif->INL(SATA_STATUS_REG) & 0x03) != 0x03) { - printk("%s: reset phy dead, status=0x%08x\n", + printk(KERN_WARNING "%s: reset phy dead, status=0x%08x\n", hwif->name, hwif->INL(SATA_STATUS_REG)); HWGROUP(drive)->poll_timeout = 0; #if 0 @@ -475,10 +474,10 @@ if (SATA_STATUS_REG) { u32 sata_stat = hwif->INL(SATA_STATUS_REG); - printk("%s: reset phy, status=0x%08x, %s\n", + printk(KERN_WARNING "%s: reset phy, status=0x%08x, %s\n", hwif->name, sata_stat, __FUNCTION__); if (!(sata_stat)) { - printk("%s: reset phy dead, status=0x%08x\n", + printk(KERN_WARNING "%s: reset phy dead, status=0x%08x\n", hwif->name, sata_stat); drive->failures++; } @@ -491,7 +490,7 @@ if (dev->device == PCI_DEVICE_ID_SII_3112) goto sata_skip; - printk("%s: BASE CLOCK ", name); + printk(KERN_INFO "%s: BASE CLOCK ", name); clocking &= ~0x0C; switch(clocking) { case 0x03: printk("DISABLED !\n"); break; @@ -514,7 +513,6 @@ #endif /* DISPLAY_SIIMAGE_TIMINGS && CONFIG_PROC_FS */ } -#ifdef CONFIG_TRY_MMIO_SIIMAGE static unsigned int setup_mmio_siimage (struct pci_dev *dev, const char *name) { unsigned long bar5 = pci_resource_start(dev, 5); @@ -533,78 +531,73 @@ addr = (unsigned long) ioaddr; if (dev->device == PCI_DEVICE_ID_SII_3112) { - sii_outl(0, DEVADDR(0x148)); - sii_outl(0, DEVADDR(0x1C8)); + writel(0, DEVADDR(0x148)); + writel(0, DEVADDR(0x1C8)); } - sii_outb(0, DEVADDR(0xB4)); - sii_outb(0, DEVADDR(0xF4)); - tmpbyte = sii_inb(DEVADDR(0x4A)); + writeb(0, DEVADDR(0xB4)); + writeb(0, DEVADDR(0xF4)); + tmpbyte = readb(DEVADDR(0x4A)); switch(tmpbyte) { case 0x01: - sii_outb(tmpbyte|0x10, DEVADDR(0x4A)); - tmpbyte = sii_inb(DEVADDR(0x4A)); + writeb(tmpbyte|0x10, DEVADDR(0x4A)); + tmpbyte = readb(DEVADDR(0x4A)); case 0x31: /* if clocking is disabled */ /* 133 clock attempt to force it on */ - sii_outb(tmpbyte & ~0x20, DEVADDR(0x4A)); - tmpbyte = sii_inb(DEVADDR(0x4A)); + writeb(tmpbyte & ~0x20, DEVADDR(0x4A)); + tmpbyte = readb(DEVADDR(0x4A)); case 0x11: case 0x21: break; default: tmpbyte &= ~0x30; tmpbyte |= 0x20; - sii_outb(tmpbyte, DEVADDR(0x4A)); + writeb(tmpbyte, DEVADDR(0x4A)); break; } - sii_outb(0x72, DEVADDR(0xA1)); - sii_outw(0x328A, DEVADDR(0xA2)); - sii_outl(0x62DD62DD, DEVADDR(0xA4)); - sii_outl(0x43924392, DEVADDR(0xA8)); - sii_outl(0x40094009, DEVADDR(0xAC)); - sii_outb(0x72, DEVADDR(0xE1)); - sii_outw(0x328A, DEVADDR(0xE2)); - sii_outl(0x62DD62DD, DEVADDR(0xE4)); - sii_outl(0x43924392, DEVADDR(0xE8)); - sii_outl(0x40094009, DEVADDR(0xEC)); + writeb(0x72, DEVADDR(0xA1)); + writew(0x328A, DEVADDR(0xA2)); + writel(0x62DD62DD, DEVADDR(0xA4)); + writel(0x43924392, DEVADDR(0xA8)); + writel(0x40094009, DEVADDR(0xAC)); + writeb(0x72, DEVADDR(0xE1)); + writew(0x328A, DEVADDR(0xE2)); + writel(0x62DD62DD, DEVADDR(0xE4)); + writel(0x43924392, DEVADDR(0xE8)); + writel(0x40094009, DEVADDR(0xEC)); if (dev->device == PCI_DEVICE_ID_SII_3112) { - sii_outl(0xFFFF0000, DEVADDR(0x108)); - sii_outl(0xFFFF0000, DEVADDR(0x188)); - sii_outl(0x00680000, DEVADDR(0x148)); - sii_outl(0x00680000, DEVADDR(0x1C8)); + writel(0xFFFF0000, DEVADDR(0x108)); + writel(0xFFFF0000, DEVADDR(0x188)); + writel(0x00680000, DEVADDR(0x148)); + writel(0x00680000, DEVADDR(0x1C8)); } - tmpbyte = sii_inb(DEVADDR(0x4A)); + tmpbyte = readb(DEVADDR(0x4A)); proc_reports_siimage(dev, (tmpbyte>>=4), name); return 1; } -#endif /* CONFIG_TRY_MMIO_SIIMAGE */ static unsigned int __init init_chipset_siimage (struct pci_dev *dev, const char *name) { u32 class_rev = 0; u8 tmpbyte = 0; -#ifdef CONFIG_TRY_MMIO_SIIMAGE u8 BA5_EN = 0; -#endif /* CONFIG_TRY_MMIO_SIIMAGE */ pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); class_rev &= 0xff; pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (class_rev) ? 1 : 255); -#ifdef CONFIG_TRY_MMIO_SIIMAGE pci_read_config_byte(dev, 0x8A, &BA5_EN); if ((BA5_EN & 0x01) || (pci_resource_start(dev, 5))) { if (setup_mmio_siimage(dev, name)) { return 0; } } -#endif /* CONFIG_TRY_MMIO_SIIMAGE */ pci_write_config_byte(dev, 0x80, 0x00); pci_write_config_byte(dev, 0x84, 0x00); @@ -658,17 +651,7 @@ // u16 i = 0; hw_regs_t hw; - hwif->OUTB = sii_outb; - hwif->OUTW = sii_outw; - hwif->OUTL = sii_outl; - hwif->OUTSW = sii_outsw; - hwif->OUTSL = sii_outsl; - hwif->INB = sii_inb; - hwif->INW = sii_inw; - hwif->INL = sii_inl; - hwif->INSW = sii_insw; - hwif->INSL = sii_insl; - + default_hwif_mmiops(hwif); memset(&hw, 0, sizeof(hw_regs_t)); #if 1 @@ -706,7 +689,7 @@ #endif #if 0 - printk("%s: ", hwif->name); + printk(KERN_DEBUG "%s: ", hwif->name); for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) printk("0x%08x ", DEVADDR((ch) ? 0xC0 : 0x80)|(i)); printk("0x%08x ", DEVADDR((ch) ? 0xCA : 0x8A)|(i)); @@ -726,7 +709,6 @@ hw.priv = (void *) addr; // hw.priv = pci_get_drvdata(hwif->pci_dev); hw.irq = hwif->pci_dev->irq; -// hw.iops = siimage_iops; memcpy(&hwif->hw, &hw, sizeof(hw)); memcpy(hwif->io_ports, hwif->hw.io_ports, sizeof(hwif->hw.io_ports)); @@ -777,9 +759,6 @@ pci_read_config_byte(hwif->pci_dev, SELREG(0), &ata66); return (ata66 & 0x01) ? 1 : 0; } -#ifndef CONFIG_TRY_MMIO_SIIMAGE - if (hwif->mmio) BUG(); -#endif /* CONFIG_TRY_MMIO_SIIMAGE */ return (hwif->INB(SELADDR(0)) & 0x01) ? 1 : 0; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/siimage.h linux.21pre4-ac2/drivers/ide/pci/siimage.h --- linux.21pre4/drivers/ide/pci/siimage.h 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/siimage.h 2003-02-04 15:54:23.000000000 +0000 @@ -9,8 +9,6 @@ #define DISPLAY_SIIMAGE_TIMINGS -#define CONFIG_TRY_MMIO_SIIMAGE -//#undef CONFIG_TRY_MMIO_SIIMAGE #undef SIIMAGE_VIRTUAL_DMAPIO #undef SIIMAGE_BUFFERED_TASKFILE #undef SIIMAGE_LARGE_DMA @@ -36,56 +34,6 @@ #define DEVADDR(R) (((unsigned long) pci_get_drvdata(dev))|(R)) -inline u8 sii_inb (unsigned long port) -{ - return (u8) readb(port); -} - -inline u16 sii_inw (unsigned long port) -{ - return (u16) readw(port); -} - -inline void sii_insw (unsigned long port, void *addr, u32 count) -{ - __ide_mm_insw(port, addr, count); -} - -inline u32 sii_inl (unsigned long port) -{ - return (u32) readl(port); -} - -inline void sii_insl (unsigned long port, void *addr, u32 count) -{ - __ide_mm_insw(port, addr, (count)<<1); -} - -inline void sii_outb (u8 value, unsigned long port) -{ - writeb(value, port); -} - -inline void sii_outw (u16 value, unsigned long port) -{ - writew(value, port); -} - -inline void sii_outsw (unsigned long port, void *addr, u32 count) -{ - __ide_mm_outsw(port, addr, count); -} - -inline void sii_outl (u32 value, unsigned long port) -{ - writel(value, port); -} - -inline void sii_outsl (unsigned long port, void *addr, u32 count) -{ - __ide_mm_outsw(port, addr, (count)<<1); -} - #if defined(DISPLAY_SIIMAGE_TIMINGS) && defined(CONFIG_PROC_FS) #include #include diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/sis5513.c linux.21pre4-ac2/drivers/ide/pci/sis5513.c --- linux.21pre4/drivers/ide/pci/sis5513.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/sis5513.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/sis5513.c Version 0.14ac Sept 11, 2002 + * linux/drivers/ide/pci/sis5513.c Version 0.14ac Sept 11, 2002 * * Copyright (C) 1999-2000 Andre Hedrick * Copyright (C) 2002 Lionel Bouton , Maintainer @@ -18,6 +18,10 @@ * ATA100 tests and design on the SiS735/5513 chipset. * ATA16/33 support from specs * ATA133 support for SiS961/962 by L.C. Chang + * + * Documentation: + * SiS chipset documentation available under NDA to companies not + * individuals only. */ /* diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/sl82c105.c linux.21pre4-ac2/drivers/ide/pci/sl82c105.c --- linux.21pre4/drivers/ide/pci/sl82c105.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/sl82c105.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/sl82c105.c + * linux/drivers/ide/pci/sl82c105.c * * SL82C105/Winbond 553 IDE driver * @@ -7,6 +7,10 @@ * * Drive tuning added from Rebel.com's kernel sources * -- Russell King (15/11/98) linux@arm.linux.org.uk + * + * Merge in Russell's HW workarounds, fix various problems + * with the timing registers setup. + * -- Benjamin Herrenschmidt (01/11/03) benh@kernel.crashing.org */ #include @@ -28,6 +32,24 @@ #include "ide_modes.h" #include "sl82c105.h" +#undef DEBUG + +#ifdef DEBUG +#define DBG(arg) printk arg +#else +#define DBG(fmt,...) +#endif +/* + * SL82C105 PCI config register 0x40 bits. + */ +#define CTRL_IDE_IRQB (1 << 30) +#define CTRL_IDE_IRQA (1 << 28) +#define CTRL_LEGIRQ (1 << 11) +#define CTRL_P1F16 (1 << 5) +#define CTRL_P1EN (1 << 4) +#define CTRL_P0F16 (1 << 1) +#define CTRL_P0EN (1 << 0) + /* * Convert a PIO mode and cycle time to the required on/off * times for the interface. This has protection against run-away @@ -57,7 +79,7 @@ /* * Configure the drive and chipset for PIO */ -static void config_for_pio(ide_drive_t *drive, int pio, int report) +static void config_for_pio(ide_drive_t *drive, int pio, int report, int chipset_only) { ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = hwif->pci_dev; @@ -65,14 +87,20 @@ u16 drv_ctrl = 0x909; unsigned int xfer_mode, reg; + DBG(("config_for_pio(drive:%s, pio:%d, report:%d, chipset_only:%d)\n", + drive->name, pio, report, chipset_only)); + reg = (hwif->channel ? 0x4c : 0x44) + (drive->select.b.unit ? 4 : 0); pio = ide_get_best_pio_mode(drive, pio, 5, &p); xfer_mode = XFER_PIO_0 + pio; - if (ide_config_drive_speed(drive, xfer_mode) == 0) + if (chipset_only || ide_config_drive_speed(drive, xfer_mode) == 0) { drv_ctrl = get_timing_sl82c105(&p); + drive->pio_speed = xfer_mode; + } else + drive->pio_speed = XFER_PIO_0; if (drive->using_dma == 0) { /* @@ -96,15 +124,16 @@ { ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = hwif->pci_dev; - u16 drv_ctrl = 0x909; unsigned int reg; + DBG(("config_for_dma(drive:%s)\n", drive->name)); + reg = (hwif->channel ? 0x4c : 0x44) + (drive->select.b.unit ? 4 : 0); - if (ide_config_drive_speed(drive, XFER_MW_DMA_2) == 0) - drv_ctrl = 0x0240; + if (ide_config_drive_speed(drive, XFER_MW_DMA_2) != 0) + return 1; - pci_write_config_word(dev, reg, drv_ctrl); + pci_write_config_word(dev, reg, 0x0240); return 0; } @@ -117,6 +146,9 @@ static int sl82c105_check_drive (ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); + + DBG(("sl82c105_check_drive(drive:%s)\n", drive->name)); + do { struct hd_driveid *id = drive->id; @@ -143,34 +175,188 @@ return hwif->ide_dma_off_quietly(drive); } +/* + * The SL82C105 holds off all IDE interrupts while in DMA mode until + * all DMA activity is completed. Sometimes this causes problems (eg, + * when the drive wants to report an error condition). + * + * 0x7e is a "chip testing" register. Bit 2 resets the DMA controller + * state machine. We need to kick this to work around various bugs. + */ +static inline void sl82c105_reset_host(struct pci_dev *dev) +{ + u16 val; + + pci_read_config_word(dev, 0x7e, &val); + pci_write_config_word(dev, 0x7e, val | (1 << 2)); + pci_write_config_word(dev, 0x7e, val & ~(1 << 2)); +} + +/* + * If we get an IRQ timeout, it might be that the DMA state machine + * got confused. Fix from Todd Inglett. Details from Winbond. + * + * This function is called when the IDE timer expires, the drive + * indicates that it is READY, and we were waiting for DMA to complete. + */ +static int sl82c105_ide_dma_lost_irq(ide_drive_t *drive) +{ + ide_hwif_t *hwif = HWIF(drive); + struct pci_dev *dev = hwif->pci_dev; + u32 val, mask = hwif->channel ? CTRL_IDE_IRQB : CTRL_IDE_IRQA; + unsigned long dma_base = hwif->dma_base; + + printk("sl82c105: lost IRQ: resetting host\n"); + + /* + * Check the raw interrupt from the drive. + */ + pci_read_config_dword(dev, 0x40, &val); + if (val & mask) + printk("sl82c105: drive was requesting IRQ, but host lost it\n"); + + /* + * Was DMA enabled? If so, disable it - we're resetting the + * host. The IDE layer will be handling the drive for us. + */ + val = hwif->INB(dma_base); + if (val & 1) { + outb(val & ~1, dma_base); + printk("sl82c105: DMA was enabled\n"); + } + + sl82c105_reset_host(dev); + + /* ide_dmaproc would return 1, so we do as well */ + return 1; +} + +/* + * ATAPI devices can cause the SL82C105 DMA state machine to go gaga. + * Winbond recommend that the DMA state machine is reset prior to + * setting the bus master DMA enable bit. + * + * The generic IDE core will have disabled the BMEN bit before this + * function is called. + */ +static int sl82c105_ide_dma_begin(ide_drive_t *drive) +{ + ide_hwif_t *hwif = HWIF(drive); + struct pci_dev *dev = hwif->pci_dev; + +// DBG(("sl82c105_ide_dma_begin(drive:%s)\n", drive->name)); + + sl82c105_reset_host(dev); + return __ide_dma_begin(drive); +} + +static int sl82c105_ide_dma_timeout(ide_drive_t *drive) +{ + ide_hwif_t *hwif = HWIF(drive); + struct pci_dev *dev = hwif->pci_dev; + + DBG(("sl82c105_ide_dma_timeout(drive:%s)\n", drive->name)); + + sl82c105_reset_host(dev); + return __ide_dma_timeout(drive); +} + static int sl82c105_ide_dma_on (ide_drive_t *drive) { + DBG(("sl82c105_ide_dma_on(drive:%s)\n", drive->name)); + if (config_for_dma(drive)) { - config_for_pio(drive, 4, 0); + config_for_pio(drive, 4, 0, 0); return HWIF(drive)->ide_dma_off_quietly(drive); } + printk(KERN_INFO "%s: DMA enabled\n", drive->name); return __ide_dma_on(drive); } static int sl82c105_ide_dma_off (ide_drive_t *drive) { - config_for_pio(drive, 4, 0); - return __ide_dma_off(drive); + u8 speed = XFER_PIO_0; + int rc; + + DBG(("sl82c105_ide_dma_off(drive:%s)\n", drive->name)); + + rc = __ide_dma_off(drive); + if (drive->pio_speed) + speed = drive->pio_speed - XFER_PIO_0; + config_for_pio(drive, speed, 0, 1); + drive->current_speed = drive->pio_speed; + + return rc; } static int sl82c105_ide_dma_off_quietly (ide_drive_t *drive) { - config_for_pio(drive, 4, 0); - return __ide_dma_off_quietly(drive); + u8 speed = XFER_PIO_0; + int rc; + + DBG(("sl82c105_ide_dma_off_quietly(drive:%s)\n", drive->name)); + + rc = __ide_dma_off_quietly(drive); + if (drive->pio_speed) + speed = drive->pio_speed - XFER_PIO_0; + config_for_pio(drive, speed, 0, 1); + drive->current_speed = drive->pio_speed; + + return rc; +} + +/* + * Ok, that is nasty, but we must make sure the DMA timings + * won't be used for a PIO access. The solution here is + * to make sure the 16 bits mode is diabled on the channel + * when DMA is enabled, thus causing the chip to use PIO0 + * timings for those operations. + */ +static void sl82c105_selectproc(ide_drive_t *drive) +{ + ide_hwif_t *hwif = HWIF(drive); + struct pci_dev *dev = hwif->pci_dev; + u32 val, old, mask; + + //DBG(("sl82c105_selectproc(drive:%s)\n", drive->name)); + + mask = hwif->channel ? CTRL_P1F16 : CTRL_P0F16; + old = val = *((u32 *)&hwif->hwif_data); + if (drive->using_dma) + val &= ~mask; + else + val |= mask; + if (old != val) { + pci_write_config_dword(dev, 0x40, val); + *((u32 *)&hwif->hwif_data) = val; + } } /* + * ATA reset will clear the 16 bits mode in the control + * register, we need to update our cache + */ +static void sl82c105_resetproc(ide_drive_t *drive) +{ + ide_hwif_t *hwif = HWIF(drive); + struct pci_dev *dev = hwif->pci_dev; + u32 val; + + DBG(("sl82c105_resetproc(drive:%s)\n", drive->name)); + + pci_read_config_dword(dev, 0x40, &val); + *((u32 *)&hwif->hwif_data) = val; +} + +/* * We only deal with PIO mode here - DMA mode 'using_dma' is not * initialised at the point that this function is called. */ static void tune_sl82c105(ide_drive_t *drive, u8 pio) { - config_for_pio(drive, pio, 1); + DBG(("tune_sl82c105(drive:%s)\n", drive->name)); + + config_for_pio(drive, pio, 1, 0); /* * We support 32-bit I/O on this interface, and it @@ -189,16 +375,20 @@ struct pci_dev *bridge; u8 rev; - bridge = pci_find_device(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_83C553, NULL); - /* - * If we are part of a Winbond 553 + * The bridge should be part of the same device, but function 0. */ - if (!bridge || bridge->class >> 8 != PCI_CLASS_BRIDGE_ISA) + bridge = pci_find_slot(dev->bus->number, + PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); + if (!bridge) return -1; - if (bridge->bus != dev->bus || - PCI_SLOT(bridge->devfn) != PCI_SLOT(dev->devfn)) + /* + * Make sure it is a Winbond 553 and is an ISA bridge. + */ + if (bridge->vendor != PCI_VENDOR_ID_WINBOND || + bridge->device != PCI_DEVICE_ID_WINBOND_83C553 || + bridge->class >> 8 != PCI_CLASS_BRIDGE_ISA) return -1; /* @@ -211,16 +401,21 @@ /* * Enable the PCI device + * + * --BenH: It's arch fixup code that should enable channels that + * have not been enabled by firmware. I decided we can still enable + * channel 0 here at least, but channel 1 has to be enabled by + * firmware or arch code. We still set both to 16 bits mode. */ static unsigned int __init init_chipset_sl82c105(struct pci_dev *dev, const char *msg) { - u8 ctrl_stat; + u32 val; - /* - * Enable the ports - */ - pci_read_config_byte(dev, 0x40, &ctrl_stat); - pci_write_config_byte(dev, 0x40, ctrl_stat | 0x33); + DBG(("init_chipset_sl82c105()\n")); + + pci_read_config_dword(dev, 0x40, &val); + val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16; + pci_write_config_dword(dev, 0x40, val); return dev->irq; } @@ -230,6 +425,8 @@ unsigned int rev; u8 dma_state; + DBG(("init_dma_sl82c105(hwif: ide%d, dma_base: 0x%08x)\n", hwif->index, dma_base)); + hwif->autodma = 0; if (!dma_base) @@ -238,8 +435,6 @@ dma_state = hwif->INB(dma_base + 2); rev = sl82c105_bridge_revision(hwif->pci_dev); if (rev <= 5) { - hwif->drives[0].autotune = 1; - hwif->drives[1].autotune = 1; printk(" %s: Winbond 553 bridge revision %d, BM-DMA disabled\n", hwif->name, rev); dma_state &= ~0x60; @@ -259,8 +454,28 @@ static void __init init_hwif_sl82c105(ide_hwif_t *hwif) { - hwif->tuneproc = tune_sl82c105; + struct pci_dev *dev = hwif->pci_dev; + u32 val; + + DBG(("init_hwif_sl82c105(hwif: ide%d)\n", hwif->index)); + hwif->tuneproc = tune_sl82c105; + hwif->selectproc = sl82c105_selectproc; + hwif->resetproc = sl82c105_resetproc; + + /* Default to PIO 0 for fallback unless tuned otherwise, + * we always autotune PIO, this is done before DMA is + * checked, so there is no risk of accidentally disabling + * DMA + */ + hwif->drives[0].pio_speed = XFER_PIO_0; + hwif->drives[0].autotune = 1; + hwif->drives[1].pio_speed = XFER_PIO_1; + hwif->drives[1].autotune = 1; + + pci_read_config_dword(dev, 0x40, &val); + *((u32 *)&hwif->hwif_data) = val; + if (!hwif->dma_base) return; @@ -273,6 +488,10 @@ hwif->ide_dma_on = &sl82c105_ide_dma_on; hwif->ide_dma_off = &sl82c105_ide_dma_off; hwif->ide_dma_off_quietly = &sl82c105_ide_dma_off_quietly; + hwif->ide_dma_lostirq = &sl82c105_ide_dma_lost_irq; + hwif->ide_dma_begin = &sl82c105_ide_dma_begin; + hwif->ide_dma_timeout = &sl82c105_ide_dma_timeout; + if (!noautodma) hwif->autodma = 1; hwif->drives[0].autodma = hwif->autodma; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/slc90e66.c linux.21pre4-ac2/drivers/ide/pci/slc90e66.c --- linux.21pre4/drivers/ide/pci/slc90e66.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/slc90e66.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/slc90e66.c Version 0.11 September 11, 2002 + * linux/drivers/ide/pci/slc90e66.c Version 0.11 September 11, 2002 * * Copyright (C) 2000-2002 Andre Hedrick * diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/triflex.c linux.21pre4-ac2/drivers/ide/pci/triflex.c --- linux.21pre4/drivers/ide/pci/triflex.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/triflex.c 2003-01-11 23:47:56.000000000 +0000 @@ -22,6 +22,9 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Loosely based on the piix & svwks drivers. + * + * Documentation: + * Not publically available. */ #include diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/trm290.c linux.21pre4-ac2/drivers/ide/pci/trm290.c --- linux.21pre4/drivers/ide/pci/trm290.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/trm290.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/trm290.c Version 1.02 Mar. 18, 2000 + * linux/drivers/ide/pci/trm290.c Version 1.02 Mar. 18, 2000 * * Copyright (c) 1997-1998 Mark Lord * May be copied or modified under the terms of the GNU General Public License diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/pci/via82cxxx.c linux.21pre4-ac2/drivers/ide/pci/via82cxxx.c --- linux.21pre4/drivers/ide/pci/via82cxxx.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/pci/via82cxxx.c 2003-01-11 23:44:37.000000000 +0000 @@ -24,6 +24,10 @@ * Michel Aubry * Jeff Garzik * Andre Hedrick + * + * Documentation: + * Obsolete device documentation publically available from via.com.tw + * Current device documentation available under NDA only */ /* diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/ppc/mpc8xx.c linux.21pre4-ac2/drivers/ide/ppc/mpc8xx.c --- linux.21pre4/drivers/ide/ppc/mpc8xx.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/ppc/mpc8xx.c 2003-01-28 16:28:01.000000000 +0000 @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/ide-m8xx.c + * linux/drivers/ide/ppc/ide-m8xx.c * * Copyright (C) 2000, 2001 Wolfgang Denk, wd@denx.de * Modified for direct IDE interface diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/ppc/pmac.c linux.21pre4-ac2/drivers/ide/ppc/pmac.c --- linux.21pre4/drivers/ide/ppc/pmac.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/ppc/pmac.c 2003-02-04 14:57:08.000000000 +0000 @@ -5,7 +5,7 @@ * These IDE interfaces are memory-mapped and have a DBDMA channel * for doing DMA. * - * Copyright (C) 1998-2001 Paul Mackerras & Ben. Herrenschmidt + * Copyright (C) 1998-2002 Paul Mackerras & Ben. Herrenschmidt * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -43,21 +43,22 @@ #include #endif #include "ide_modes.h" +#include "ide-timing.h" extern void ide_do_request(ide_hwgroup_t *hwgroup, int masked_irq); -#define IDE_PMAC_DEBUG - +#undef IDE_PMAC_DEBUG #define DMA_WAIT_TIMEOUT 500 typedef struct pmac_ide_hwif { ide_ioreg_t regbase; + unsigned long mapbase; int irq; int kind; int aapl_bus_id; + int cable_80; struct device_node* node; - u32 timings[2]; - int index; + u32 timings[4]; #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC /* Those fields are duplicating what is in hwif. We currently * can't use the hwif ones because of some assumptions that are @@ -82,7 +83,15 @@ controller_heathrow, /* Heathrow/Paddington */ controller_kl_ata3, /* KeyLargo ATA-3 */ controller_kl_ata4, /* KeyLargo ATA-4 */ - controller_kl_ata4_80 /* KeyLargo ATA-4 with 80 conductor cable */ + controller_un_ata6 /* UniNorth2 ATA-6 */ +}; + +static const char* model_name[] = { + "OHare ATA", /* OHare based */ + "Heathrow ATA", /* Heathrow/Paddington */ + "KeyLargo ATA-3", /* KeyLargo ATA-3 */ + "KeyLargo ATA-4", /* KeyLargo ATA-4 */ + "UniNorth ATA-6" /* UniNorth2 ATA-6 */ }; /* @@ -91,6 +100,11 @@ #define IDE_TIMING_CONFIG 0x200 #define IDE_INTERRUPT 0x300 +/* Kauai (U2) ATA has different register setup */ +#define IDE_KAUAI_PIO_CONFIG 0x200 +#define IDE_KAUAI_ULTRA_CONFIG 0x210 +#define IDE_KAUAI_POLL_CONFIG 0x220 + /* * Timing configuration register definitions */ @@ -101,6 +115,28 @@ #define IDE_SYSCLK_NS 30 /* 33Mhz cell */ #define IDE_SYSCLK_66_NS 15 /* 66Mhz cell */ +/* 100Mhz cell, found in Uninorth 2. I don't have much infos about + * this one yet, it appears as a pci device (106b/0033) on uninorth + * internal PCI bus and it's clock is controlled like gem or fw. It + * appears to be an evolution of keylargo ATA4 with a timing register + * extended to 2 32bits registers and a similar DBDMA channel. Other + * registers seem to exist but I can't tell much about them. + * + * So far, I'm using pre-calculated tables for this extracted from + * the values used by the MacOS X driver. + * + * The "PIO" register controls PIO and MDMA timings, the "ULTRA" + * register controls the UDMA timings. At least, it seems bit 0 + * of this one enables UDMA vs. MDMA, and bits 4..7 are the + * cycle time in units of 10ns. Bits 8..15 are used by I don't + * know their meaning yet + */ +#define TR_100_PIOREG_PIO_MASK 0xff000fff +#define TR_100_PIOREG_MDMA_MASK 0x00fff000 +#define TR_100_UDMAREG_UDMA_MASK 0x0000ffff +#define TR_100_UDMAREG_UDMA_EN 0x00000001 + + /* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on * 40 connector cable and to 4 on 80 connector one. * Clock unit is 15ns (66Mhz) @@ -115,8 +151,7 @@ * well, despite a comment that would lead to think it has a * min value of 45ns. * Apple also add 60ns to the write data setup (or cycle time ?) on - * reads. I can't explain that, I tried it and it broke everything - * here. + * reads. */ #define TR_66_UDMA_MASK 0xfff00000 #define TR_66_UDMA_EN 0x00100000 /* Enable Ultra mode for DMA */ @@ -220,12 +255,12 @@ { 0, 0, 0 } }; -/* Ultra DMA timings (rounded) */ +/* KeyLargo ATA-4 Ultra DMA timings (rounded) */ struct { int addrSetup; /* ??? */ int rdy2pause; int wrDataSetup; -} udma_timings[] __pmacdata = +} kl66_udma_timings[] __pmacdata = { { 0, 180, 120 }, /* Mode 0 */ { 0, 150, 90 }, /* 1 */ @@ -234,6 +269,63 @@ { 0, 90, 30 } /* 4 */ }; +/* UniNorth 2 ATA/100 timings */ +struct kauai_timing { + int cycle_time; + u32 timing_reg; +}; + +static struct kauai_timing kauai_pio_timings[] __pmacdata = +{ + { 930 , 0x08000fff }, + { 600 , 0x08000a92 }, + { 383 , 0x0800060f }, + { 360 , 0x08000492 }, + { 330 , 0x0800048f }, + { 300 , 0x080003cf }, + { 270 , 0x080003cc }, + { 240 , 0x0800038b }, + { 239 , 0x0800030c }, + { 180 , 0x05000249 }, + { 120 , 0x04000148 } +}; + +static struct kauai_timing kauai_mdma_timings[] __pmacdata = +{ + { 1260 , 0x00fff000 }, + { 480 , 0x00618000 }, + { 360 , 0x00492000 }, + { 270 , 0x0038e000 }, + { 240 , 0x0030c000 }, + { 210 , 0x002cb000 }, + { 180 , 0x00249000 }, + { 150 , 0x00209000 }, + { 120 , 0x00148000 }, + { 0 , 0 }, +}; + +static struct kauai_timing kauai_udma_timings[] __pmacdata = +{ + { 120 , 0x000070c0 }, + { 90 , 0x00005d80 }, + { 60 , 0x00004a60 }, + { 45 , 0x00003a50 }, + { 30 , 0x00002a30 }, + { 20 , 0x00002921 }, + { 0 , 0 }, +}; + +static inline u32 +kauai_lookup_timing(struct kauai_timing* table, int cycle_time) +{ + int i; + + for (i=0; table[i].cycle_time; i++) + if (cycle_time > table[i+1].cycle_time) + return table[i].timing_reg; + return 0; +} + /* allow up to 256 DBDMA commands per xfer */ #define MAX_DCMDS 256 @@ -242,14 +334,18 @@ * NOTE: There is at least one case I know of a disk that needs about 10sec * before anwering on the bus. I beleive we could add a kernel command * line arg to override this delay for such cases. + * + * NOTE2: This has to be fixed with a BSY wait loop. I'm working on adding + * that to the generic probe code. */ #define IDE_WAKEUP_DELAY_MS 2000 static void pmac_ide_setup_dma(struct device_node *np, int ix); -static int pmac_ide_build_dmatable(ide_drive_t *drive, int wr); +static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq, int ddir); static int pmac_ide_tune_chipset(ide_drive_t *drive, u8 speed); static void pmac_ide_tuneproc(ide_drive_t *drive, u8 pio); static void pmac_ide_selectproc(ide_drive_t *drive); +static void pmac_ide_kauai_selectproc(ide_drive_t *drive); static int pmac_ide_dma_begin (ide_drive_t *drive); #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ @@ -315,6 +411,41 @@ (void)readl((unsigned *)(IDE_DATA_REG+IDE_TIMING_CONFIG)); } +static void __pmac +pmac_ide_kauai_selectproc(ide_drive_t *drive) +{ + pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; + + if (pmif == NULL) + return; + + if (drive->select.b.unit & 0x01) { + writel(pmif->timings[1], + (unsigned *)(IDE_DATA_REG + IDE_KAUAI_PIO_CONFIG)); + writel(pmif->timings[3], + (unsigned *)(IDE_DATA_REG + IDE_KAUAI_ULTRA_CONFIG)); + } else { + writel(pmif->timings[0], + (unsigned *)(IDE_DATA_REG + IDE_KAUAI_PIO_CONFIG)); + writel(pmif->timings[2], + (unsigned *)(IDE_DATA_REG + IDE_KAUAI_ULTRA_CONFIG)); + } + (void)readl((unsigned *)(IDE_DATA_REG + IDE_KAUAI_PIO_CONFIG)); +} + +static void __pmac +pmac_ide_do_update_timings(ide_drive_t *drive) +{ + pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; + + if (pmif == NULL) + return; + + if (pmif->kind == controller_un_ata6) + pmac_ide_kauai_selectproc(drive); + else + pmac_ide_selectproc(drive); +} static int __pmac pmac_ide_do_setfeature(ide_drive_t *drive, u8 command) @@ -403,21 +534,27 @@ if (pmif == NULL) return; + /* which drive is it ? */ + timings = &pmif->timings[drive->select.b.unit & 0x01]; + pio = ide_get_best_pio_mode(drive, pio, 4, &d); - accessTicks = SYSCLK_TICKS(ide_pio_timings[pio].active_time); - if (drive->select.b.unit & 0x01) - timings = &pmif->timings[1]; - else - timings = &pmif->timings[0]; - recTime = d.cycle_time - ide_pio_timings[pio].active_time - - ide_pio_timings[pio].setup_time; - recTime = max(recTime, 150U); - accessTime = ide_pio_timings[pio].active_time; - accessTime = max(accessTime, 150U); - if (pmif->kind == controller_kl_ata4 || - pmif->kind == controller_kl_ata4_80) { + switch (pmif->kind) { + case controller_un_ata6: { + /* 100Mhz cell */ + u32 tr = kauai_lookup_timing(kauai_pio_timings, d.cycle_time); + if (tr == 0) + return; + *timings = ((*timings) & ~TR_100_PIOREG_PIO_MASK) | tr; + break; + } + case controller_kl_ata4: /* 66Mhz cell */ + recTime = d.cycle_time - ide_pio_timings[pio].active_time + - ide_pio_timings[pio].setup_time; + recTime = max(recTime, 150U); + accessTime = ide_pio_timings[pio].active_time; + accessTime = max(accessTime, 150U); accessTicks = SYSCLK_TICKS_66(accessTime); accessTicks = min(accessTicks, 0x1fU); recTicks = SYSCLK_TICKS_66(recTime); @@ -425,9 +562,15 @@ *timings = ((*timings) & ~TR_66_PIO_MASK) | (accessTicks << TR_66_PIO_ACCESS_SHIFT) | (recTicks << TR_66_PIO_RECOVERY_SHIFT); - } else { + break; + default: { /* 33Mhz cell */ int ebit = 0; + recTime = d.cycle_time - ide_pio_timings[pio].active_time + - ide_pio_timings[pio].setup_time; + recTime = max(recTime, 150U); + accessTime = ide_pio_timings[pio].active_time; + accessTime = max(accessTime, 150U); accessTicks = SYSCLK_TICKS(accessTime); accessTicks = min(accessTicks, 0x1fU); accessTicks = max(accessTicks, 4U); @@ -443,6 +586,8 @@ (recTicks << TR_33_PIO_RECOVERY_SHIFT); if (ebit) *timings |= TR_33_PIO_E; + break; + } } #ifdef IDE_PMAC_DEBUG @@ -451,18 +596,21 @@ #endif if (drive->select.all == HWIF(drive)->INB(IDE_SELECT_REG)) - pmac_ide_selectproc(drive); + pmac_ide_do_update_timings(drive); } #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC static int __pmac -set_timings_udma(u32 *timings, u8 speed) +set_timings_udma_ata4(u32 *timings, u8 speed) { unsigned rdyToPauseTicks, wrDataSetupTicks, addrTicks; - rdyToPauseTicks = SYSCLK_TICKS_66(udma_timings[speed & 0xf].rdy2pause); - wrDataSetupTicks = SYSCLK_TICKS_66(udma_timings[speed & 0xf].wrDataSetup); - addrTicks = SYSCLK_TICKS_66(udma_timings[speed & 0xf].addrSetup); + if (speed > XFER_UDMA_4) + return 1; + + rdyToPauseTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].rdy2pause); + wrDataSetupTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].wrDataSetup); + addrTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].addrSetup); *timings = ((*timings) & ~(TR_66_UDMA_MASK | TR_66_MDMA_MASK)) | (wrDataSetupTicks << TR_66_UDMA_WRDATASETUP_SHIFT) | @@ -478,11 +626,28 @@ } static int __pmac -set_timings_mdma(int intf_type, u32 *timings, u8 speed, int drive_cycle_time) +set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed) +{ + struct ide_timing *t = ide_timing_find_mode(speed); + u32 tr; + + if (speed > XFER_UDMA_5 || t == NULL) + return 1; + tr = kauai_lookup_timing(kauai_udma_timings, (int)t->udma); + if (tr == 0) + return 1; + *ultra_timings = ((*ultra_timings) & ~TR_100_UDMAREG_UDMA_MASK) | tr; + *ultra_timings = (*ultra_timings) | TR_100_UDMAREG_UDMA_EN; + + return 0; +} + +static int __pmac +set_timings_mdma(int intf_type, u32 *timings, u32 *timings2, u8 speed, int drive_cycle_time) { int cycleTime, accessTime, recTime; unsigned accessTicks, recTicks; - struct mdma_timings_t* tm; + struct mdma_timings_t* tm = NULL; int i; /* Get default cycle time for mode */ @@ -491,7 +656,7 @@ case 1: cycleTime = 150; break; case 2: cycleTime = 120; break; default: - return -1; + return 1; } /* Adjust for drive */ if (drive_cycle_time && drive_cycle_time > cycleTime) @@ -501,8 +666,9 @@ cycleTime = 150; /* Get the proper timing array for this controller */ switch(intf_type) { + case controller_un_ata6: + break; case controller_kl_ata4: - case controller_kl_ata4_80: tm = mdma_timings_66; break; case controller_kl_ata3: @@ -512,24 +678,36 @@ tm = mdma_timings_33; break; } - /* Lookup matching access & recovery times */ - i = -1; - for (;;) { - if (tm[i+1].cycleTime < cycleTime) - break; - i++; - } - if (i < 0) - return -1; - cycleTime = tm[i].cycleTime; - accessTime = tm[i].accessTime; - recTime = tm[i].recoveryTime; + if (tm != NULL) { + /* Lookup matching access & recovery times */ + i = -1; + for (;;) { + if (tm[i+1].cycleTime < cycleTime) + break; + i++; + } + if (i < 0) + return 1; + cycleTime = tm[i].cycleTime; + accessTime = tm[i].accessTime; + recTime = tm[i].recoveryTime; #ifdef IDE_PMAC_DEBUG - printk(KERN_ERR "ide_pmac: MDMA, cycleTime: %d, accessTime: %d, recTime: %d\n", - cycleTime, accessTime, recTime); -#endif - if (intf_type == controller_kl_ata4 || intf_type == controller_kl_ata4_80) { + printk(KERN_ERR "ide_pmac: MDMA, cycleTime: %d, accessTime: %d, recTime: %d\n", + cycleTime, accessTime, recTime); +#endif + } + switch(intf_type) { + case controller_un_ata6: { + /* 100Mhz cell */ + u32 tr = kauai_lookup_timing(kauai_mdma_timings, cycleTime); + if (tr == 0) + return 1; + *timings = ((*timings) & ~TR_100_PIOREG_MDMA_MASK) | tr; + *timings2 = (*timings2) & ~TR_100_UDMAREG_UDMA_EN; + } + break; + case controller_kl_ata4: /* 66Mhz cell */ accessTicks = SYSCLK_TICKS_66(accessTime); accessTicks = min(accessTicks, 0x1fU); @@ -541,7 +719,8 @@ *timings = ((*timings) & ~(TR_66_MDMA_MASK | TR_66_UDMA_MASK)) | (accessTicks << TR_66_MDMA_ACCESS_SHIFT) | (recTicks << TR_66_MDMA_RECOVERY_SHIFT); - } else if (intf_type == controller_kl_ata3) { + break; + case controller_kl_ata3: /* 33Mhz cell on KeyLargo */ accessTicks = SYSCLK_TICKS(accessTime); accessTicks = max(accessTicks, 1U); @@ -553,7 +732,8 @@ *timings = ((*timings) & ~TR_33_MDMA_MASK) | (accessTicks << TR_33_MDMA_ACCESS_SHIFT) | (recTicks << TR_33_MDMA_RECOVERY_SHIFT); - } else { + break; + default: { /* 33Mhz cell on others */ int halfTick = 0; int origAccessTime = accessTime; @@ -578,6 +758,7 @@ (recTicks << TR_33_MDMA_RECOVERY_SHIFT); if (halfTick) *timings |= TR_33_MDMA_HALFTICK; + } } #ifdef IDE_PMAC_DEBUG printk(KERN_ERR "ide_pmac: Set MDMA timing for mode %d, reg: 0x%08x\n", @@ -591,36 +772,42 @@ * our, normal mdma function is supposed to be more precise */ static int __pmac -pmac_ide_tune_chipset (ide_drive_t *drive, u8 speed) +pmac_ide_tune_chipset (ide_drive_t *drive, byte speed) { int unit = (drive->select.b.unit & 0x01); int ret = 0; pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; - u32 *timings; - + u32 *timings, *timings2; + if (pmif == NULL) return 1; - + timings = &pmif->timings[unit]; + timings2 = &pmif->timings[unit+2]; switch(speed) { #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC + case XFER_UDMA_5: + if (pmif->kind != controller_un_ata6) + return 1; case XFER_UDMA_4: case XFER_UDMA_3: - if (pmif->kind != controller_kl_ata4_80) + if (HWIF(drive)->udma_four == 0) return 1; case XFER_UDMA_2: case XFER_UDMA_1: case XFER_UDMA_0: - if (pmif->kind != controller_kl_ata4 && - pmif->kind != controller_kl_ata4_80) - return 1; - ret = set_timings_udma(timings, speed); + if (pmif->kind == controller_kl_ata4) + ret = set_timings_udma_ata4(timings, speed); + else if (pmif->kind == controller_un_ata6) + ret = set_timings_udma_ata6(timings, timings2, speed); + else + ret = 1; break; case XFER_MW_DMA_2: case XFER_MW_DMA_1: case XFER_MW_DMA_0: - ret = set_timings_mdma(pmif->kind, timings, speed, 0); + ret = set_timings_mdma(pmif->kind, timings, timings2, speed, 0); break; case XFER_SW_DMA_2: case XFER_SW_DMA_1: @@ -644,7 +831,7 @@ if (ret) return ret; - pmac_ide_selectproc(drive); + pmac_ide_do_update_timings(drive); drive->current_speed = speed; return 0; @@ -653,11 +840,14 @@ static void __pmac sanitize_timings(pmac_ide_hwif_t *pmif) { - unsigned value; + unsigned int value, value2 = 0; switch(pmif->kind) { + case controller_un_ata6: + value = 0x08618a92; + value2 = 0x00002921; + break; case controller_kl_ata4: - case controller_kl_ata4_80: value = 0x0008438c; break; case controller_kl_ata3: @@ -670,6 +860,7 @@ break; } pmif->timings[0] = pmif->timings[1] = value; + pmif->timings[2] = pmif->timings[3] = value2; } ide_ioreg_t __pmac @@ -689,6 +880,17 @@ return -1; } +struct device_node* +pmac_ide_get_of_node(int index) +{ + /* Don't access the pmac_ide array on non-pmac */ + if (pmac_ide_count == 0) + return NULL; + if (pmac_ide[index].regbase == 0) + return NULL; + return pmac_ide[index].node; +} + int __pmac pmac_ide_get_irq(ide_ioreg_t base) { @@ -731,7 +933,7 @@ int i; struct device_node *atas; struct device_node *p, **pp, *removables, **rp; - unsigned long base; + unsigned long base, regbase; int irq, big_delay; ide_hwif_t *hwif; @@ -772,29 +974,89 @@ /* * If this node is not under a mac-io or dbdma node, - * leave it to the generic PCI driver. + * leave it to the generic PCI driver. Except for U2's + * Kauai ATA */ - for (tp = np->parent; tp != 0; tp = tp->parent) - if (tp->type && (strcmp(tp->type, "mac-io") == 0 - || strcmp(tp->type, "dbdma") == 0)) - break; - if (tp == 0) - continue; + if (!device_is_compatible(np, "kauai-ata")) { + for (tp = np->parent; tp != 0; tp = tp->parent) + if (tp->type && (strcmp(tp->type, "mac-io") == 0 + || strcmp(tp->type, "dbdma") == 0)) + break; + if (tp == 0) + continue; - if (np->n_addrs == 0) { - printk(KERN_WARNING "ide: no address for device %s\n", - np->full_name); - continue; - } + if (np->n_addrs == 0) { + printk(KERN_WARNING "ide-pmac: no address for device %s\n", + np->full_name); + continue; + } + /* We need to find the pci_dev of the mac-io holding the + * IDE interface + */ + if (pci_device_from_OF_node(tp, &pbus, &pid) == 0) + pdev = pci_find_slot(pbus, pid); - /* We need to find the pci_dev of the mac-io holding the - * IDE interface - */ - if (pci_device_from_OF_node(tp, &pbus, &pid) == 0) - pdev = pci_find_slot(pbus, pid); - if (pdev == NULL) - printk(KERN_WARNING "ide: no PCI host for device %s, DMA disabled\n", - np->full_name); + if (pdev == NULL) + printk(KERN_WARNING "ide-pmac: no PCI host for device %s, DMA disabled\n", + np->full_name); + /* + * Some older OFs have bogus sizes, causing request_OF_resource + * to fail. We fix them up here + */ + if (np->addrs[0].size > 0x1000) + np->addrs[0].size = 0x1000; + if (np->n_addrs > 1 && np->addrs[1].size > 0x100) + np->addrs[1].size = 0x100; + + if (request_OF_resource(np, 0, " (mac-io IDE IO)") == NULL) { + printk(KERN_ERR "ide-pmac(%s): can't request IO resource !\n", np->name); + continue; + } + + base = (unsigned long) ioremap(np->addrs[0].address, 0x400); + regbase = base; + + /* XXX This is bogus. Should be fixed in the registry by checking + the kind of host interrupt controller, a bit like gatwick + fixes in irq.c + */ + if (np->n_intrs == 0) { + printk(KERN_WARNING "ide-pmac: no intrs for device %s, using 13\n", + np->full_name); + irq = 13; + } else { + irq = np->intrs[0].line; + } + } else { + unsigned long rbase, rlen; + + if (pci_device_from_OF_node(np, &pbus, &pid) == 0) + pdev = pci_find_slot(pbus, pid); + if (pdev == NULL) { + printk(KERN_WARNING "ide-pmac: no PCI host for device %s, skipping\n", + np->full_name); + continue; + } + if (pci_enable_device(pdev)) { + printk(KERN_WARNING "ide-pmac: Can't enable PCI device for %s, skipping\n", + np->full_name); + continue; + } + pci_set_master(pdev); + + if (pci_request_regions(pdev, "U2 IDE")) { + printk(KERN_ERR "ide-pmac: Cannot obtain PCI resources\n"); + continue; + } + + rbase = pci_resource_start(pdev, 0); + rlen = pci_resource_len(pdev, 0); + + base = (unsigned long) ioremap(rbase, rlen); + regbase = base + 0x2000; + + irq = pdev->irq; + } /* * If this slot is taken (e.g. by ide-pci.c) try the next one. @@ -806,38 +1068,15 @@ break; pmif = &pmac_ide[i]; - /* - * Some older OFs have bogus sizes, causing request_OF_resource - * to fail. We fix them up here - */ - if (np->addrs[0].size > 0x1000) - np->addrs[0].size = 0x1000; - if (np->n_addrs > 1 && np->addrs[1].size > 0x100) - np->addrs[1].size = 0x100; - - if (request_OF_resource(np, 0, " (mac-io IDE IO)") == NULL) { - printk(KERN_ERR "ide-pmac(%s): can't request IO resource !\n", np->name); - continue; - } - - base = (unsigned long) ioremap(np->addrs[0].address, 0x400); - - /* XXX This is bogus. Should be fixed in the registry by checking - the kind of host interrupt controller, a bit like gatwick - fixes in irq.c - */ - if (np->n_intrs == 0) { - printk(KERN_WARNING "ide: no intrs for device %s, using 13\n", - np->full_name); - irq = 13; - } else { - irq = np->intrs[0].line; - } - pmif->regbase = base; + pmif->mapbase = base; + pmif->regbase = regbase; pmif->irq = irq; pmif->node = np; - pmif->index = i; - if (device_is_compatible(np, "keylargo-ata")) { + pmif->cable_80 = 0; + if (device_is_compatible(np, "kauai-ata")) { + pmif->kind = controller_un_ata6; + pci_set_drvdata(pdev, pmif); + } else if (device_is_compatible(np, "keylargo-ata")) { if (strcmp(np->name, "ata-4") == 0) pmif->kind = controller_kl_ata4; else @@ -850,10 +1089,11 @@ bidp = (int *)get_property(np, "AAPL,bus-id", NULL); pmif->aapl_bus_id = bidp ? *bidp : 0; - if (pmif->kind == controller_kl_ata4) { + /* Get cable type from device-tree */ + if (pmif->kind == controller_kl_ata4 || pmif->kind == controller_un_ata6) { char* cable = get_property(np, "cable-type", NULL); if (cable && !strncmp(cable, "80-", 3)) - pmif->kind = controller_kl_ata4_80; + pmif->cable_80 = 1; } /* Make sure we have sane timings */ @@ -862,7 +1102,7 @@ if (np->parent && np->parent->name && strcasecmp(np->parent->name, "media-bay") == 0) { #ifdef CONFIG_PMAC_PBOOK - media_bay_set_ide_infos(np->parent,base,irq,i); + media_bay_set_ide_infos(np->parent,regbase,irq,i); #endif /* CONFIG_PMAC_PBOOK */ in_bay = 1; if (!bidp) @@ -875,8 +1115,6 @@ ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1); } else { /* This is necessary to enable IDE when net-booting */ - printk(KERN_INFO "pmac_ide: enabling IDE bus ID %d\n", - pmif->aapl_bus_id); ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1); ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1); mdelay(10); @@ -894,33 +1132,58 @@ memcpy(hwif->io_ports, hwif->hw.io_ports, sizeof(hwif->io_ports)); hwif->chipset = ide_pmac; hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET] || in_bay; - hwif->udma_four = (pmif->kind == controller_kl_ata4_80); + hwif->udma_four = pmif->cable_80; hwif->pci_dev = pdev; hwif->drives[0].unmask = 1; hwif->drives[1].unmask = 1; hwif->tuneproc = pmac_ide_tuneproc; - hwif->selectproc = pmac_ide_selectproc; + if (pmif->kind == controller_un_ata6) + hwif->selectproc = pmac_ide_kauai_selectproc; + else + hwif->selectproc = pmac_ide_selectproc; hwif->speedproc = pmac_ide_tune_chipset; + + printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s\n", + i, model_name[pmif->kind], pmif->aapl_bus_id, + in_bay ? " (mediabay)" : ""); + #ifdef CONFIG_PMAC_PBOOK - if (in_bay && check_media_bay_by_base(base, MB_CD) == 0) + if (in_bay && check_media_bay_by_base(regbase, MB_CD) == 0) hwif->noprobe = 0; #endif /* CONFIG_PMAC_PBOOK */ #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC - if (np->n_addrs >= 2) { + if (np->n_addrs >= 2 || pmif->kind == controller_un_ata6) { /* has a DBDMA controller channel */ pmac_ide_setup_dma(np, i); } hwif->atapi_dma = 1; - hwif->ultra_mask = 0x1f; - hwif->mwdma_mask = 0x07; - hwif->swdma_mask = 0x07; - + switch(pmif->kind) { + case controller_un_ata6: + hwif->ultra_mask = pmif->cable_80 ? 0x3f : 0x07; + hwif->mwdma_mask = 0x07; + hwif->swdma_mask = 0x00; + break; + case controller_kl_ata4: + hwif->ultra_mask = pmif->cable_80 ? 0x1f : 0x07; + hwif->mwdma_mask = 0x07; + hwif->swdma_mask = 0x00; + break; + default: + hwif->ultra_mask = 0x00; + hwif->mwdma_mask = 0x07; + hwif->swdma_mask = 0x00; + break; + } #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ ++i; } pmac_ide_count = i; + + if (pmac_ide_count == 0) + return; + if (big_delay) mdelay(IDE_WAKEUP_DELAY_MS); @@ -931,41 +1194,66 @@ #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC -static int +static int __pmac pmac_ide_build_sglist(ide_hwif_t *hwif, struct request *rq, int data_dir) { pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data; struct buffer_head *bh; struct scatterlist *sg = pmif->sg_table; + unsigned long lastdataend = ~0UL; int nents = 0; if (hwif->sg_dma_active) BUG(); + pmif->sg_dma_direction = data_dir; + bh = rq->bh; do { - unsigned char *virt_addr = bh->b_data; - unsigned int size = bh->b_size; + int contig = 0; + + if (bh->b_page) { + if (bh_phys(bh) == lastdataend) + contig = 1; + } else { + if ((unsigned long) bh->b_data == lastdataend) + contig = 1; + } + + if (contig) { + sg[nents - 1].length += bh->b_size; + lastdataend += bh->b_size; + continue; + } if (nents >= MAX_DCMDS) return 0; - while ((bh = bh->b_reqnext) != NULL) { - if ((virt_addr + size) != (unsigned char *) bh->b_data) - break; - size += bh->b_size; - } memset(&sg[nents], 0, sizeof(*sg)); - sg[nents].address = virt_addr; - sg[nents].length = size; + + if (bh->b_page) { + sg[nents].page = bh->b_page; + sg[nents].offset = bh_offset(bh); + lastdataend = bh_phys(bh) + bh->b_size; + } else { + if ((unsigned long) bh->b_data < PAGE_SIZE) + BUG(); + + sg[nents].address = bh->b_data; + lastdataend = (unsigned long) bh->b_data + bh->b_size; + } + + sg[nents].length = bh->b_size; nents++; - } while (bh != NULL); + } while ((bh = bh->b_reqnext) != NULL); + + if(nents == 0) + BUG(); - pmif->sg_dma_direction = data_dir; return pci_map_sg(hwif->pci_dev, sg, nents, data_dir); } -static int +static int __pmac pmac_ide_raw_build_sglist(ide_hwif_t *hwif, struct request *rq) { pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data; @@ -1000,17 +1288,15 @@ * pmac_ide_build_dmatable builds the DBDMA command list * for a transfer and sets the DBDMA channel to point to it. */ -static int -pmac_ide_build_dmatable(ide_drive_t *drive, int wr) +static int __pmac +pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq, int ddir) { struct dbdma_cmd *table; int i, count = 0; - struct request *rq = HWGROUP(drive)->rq; ide_hwif_t *hwif = HWIF(drive); pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data; volatile struct dbdma_regs *dma = pmif->dma_regs; struct scatterlist *sg; - int data_dir; /* DMA table is already aligned */ table = (struct dbdma_cmd *) pmif->dma_table_cpu; @@ -1020,16 +1306,11 @@ while (readl(&dma->status) & RUN) udelay(1); - if (wr) - data_dir = PCI_DMA_TODEVICE; - else - data_dir = PCI_DMA_FROMDEVICE; - /* Build sglist */ if (rq->cmd == IDE_DRIVE_TASKFILE) pmif->sg_nents = i = pmac_ide_raw_build_sglist(hwif, rq); else - pmif->sg_nents = i = pmac_ide_build_sglist(hwif, rq, data_dir); + pmif->sg_nents = i = pmac_ide_build_sglist(hwif, rq, ddir); if (!i) return 0; @@ -1048,9 +1329,10 @@ if (++count >= MAX_DCMDS) { printk(KERN_WARNING "%s: DMA table too small\n", drive->name); - return 0; /* revert to PIO for this request */ + goto use_pio_instead; } - st_le16(&table->command, wr? OUTPUT_MORE: INPUT_MORE); + st_le16(&table->command, + (ddir == PCI_DMA_TODEVICE) ? OUTPUT_MORE: INPUT_MORE); st_le16(&table->req_count, tc); st_le32(&table->phy_addr, cur_addr); table->cmd_dep = 0; @@ -1065,21 +1347,29 @@ } /* convert the last command to an input/output last command */ - if (count) - st_le16(&table[-1].command, wr? OUTPUT_LAST: INPUT_LAST); - else - printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name); + if (count) { + st_le16(&table[-1].command, + (ddir == PCI_DMA_TODEVICE) ? OUTPUT_LAST: INPUT_LAST); + /* add the stop command to the end of the list */ + memset(table, 0, sizeof(struct dbdma_cmd)); + st_le16(&table->command, DBDMA_STOP); + mb(); + writel(pmif->dma_table_dma, &dma->cmdptr); + return 1; + } - /* add the stop command to the end of the list */ - memset(table, 0, sizeof(struct dbdma_cmd)); - st_le16(&table->command, DBDMA_STOP); - mb(); - writel(pmif->dma_table_dma, &dma->cmdptr); - return 1; + printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name); + use_pio_instead: + pci_unmap_sg(hwif->pci_dev, + pmif->sg_table, + pmif->sg_nents, + pmif->sg_dma_direction); + hwif->sg_dma_active = 0; + return 0; /* revert to PIO for this request */ } /* Teardown mappings after DMA has completed. */ -static void +static void __pmac pmac_ide_destroy_dmatable (ide_drive_t *drive) { struct pci_dev *dev = HWIF(drive)->pci_dev; @@ -1090,67 +1380,25 @@ if (nents) { pci_unmap_sg(dev, sg, nents, pmif->sg_dma_direction); pmif->sg_nents = 0; + HWIF(drive)->sg_dma_active = 0; } } -static __inline__ unsigned char -dma_bits_to_command(unsigned char bits) -{ - if(bits & 0x04) - return XFER_MW_DMA_2; - if(bits & 0x02) - return XFER_MW_DMA_1; - if(bits & 0x01) - return XFER_MW_DMA_0; - return 0; -} - -static __inline__ unsigned char -udma_bits_to_command(unsigned char bits, int high_speed) -{ - if (high_speed) { - if(bits & 0x10) - return XFER_UDMA_4; - if(bits & 0x08) - return XFER_UDMA_3; - } - if(bits & 0x04) - return XFER_UDMA_2; - if(bits & 0x02) - return XFER_UDMA_1; - if(bits & 0x01) - return XFER_UDMA_0; - return 0; -} - /* Calculate MultiWord DMA timings */ static int __pmac -pmac_ide_mdma_enable(ide_drive_t *drive) +pmac_ide_mdma_enable(ide_drive_t *drive, u16 mode) { - u8 bits = drive->id->dma_mword & 0x07; - u8 feature = dma_bits_to_command(bits); - u32 *timings; + ide_hwif_t *hwif = HWIF(drive); + pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data; int drive_cycle_time; struct hd_driveid *id = drive->id; - pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; + u32 *timings, *timings2; + u32 timing_local[2]; int ret; - /* Set feature on drive */ - printk(KERN_INFO "%s: Enabling MultiWord DMA %d\n", drive->name, feature & 0xf); - ret = pmac_ide_do_setfeature(drive, feature); - if (ret) { - printk(KERN_WARNING "%s: Failed !\n", drive->name); - return 0; - } - - if (!drive->init_speed) - drive->init_speed = feature; - /* which drive is it ? */ - if (drive->select.b.unit & 0x01) - timings = &pmif->timings[1]; - else - timings = &pmif->timings[0]; + timings = &pmif->timings[drive->select.b.unit & 0x01]; + timings2 = &pmif->timings[(drive->select.b.unit & 0x01) + 2]; /* Check if drive provide explicit cycle time */ if ((id->field_valid & 2) && (id->eide_dma_time)) @@ -1158,54 +1406,95 @@ else drive_cycle_time = 0; + /* Copy timings to local image */ + timing_local[0] = *timings; + timing_local[1] = *timings2; + /* Calculate controller timings */ - set_timings_mdma(pmif->kind, timings, feature, drive_cycle_time); + ret = set_timings_mdma( pmif->kind, + &timing_local[0], + &timing_local[1], + mode, + drive_cycle_time); + if (ret) + return 0; + + /* Set feature on drive */ + printk(KERN_INFO "%s: Enabling MultiWord DMA %d\n", drive->name, mode & 0xf); + ret = pmac_ide_do_setfeature(drive, mode); + if (ret) { + printk(KERN_WARNING "%s: Failed !\n", drive->name); + return 0; + } + + /* Apply timings to controller */ + *timings = timing_local[0]; + *timings2 = timing_local[1]; + + /* Set speed info in drive */ + drive->current_speed = mode; + if (!drive->init_speed) + drive->init_speed = mode; - drive->current_speed = feature; return 1; } /* Calculate Ultra DMA timings */ static int __pmac -pmac_ide_udma_enable(ide_drive_t *drive, int high_speed) +pmac_ide_udma_enable(ide_drive_t *drive, u16 mode) { - u8 bits = drive->id->dma_ultra & 0x1f; - u8 feature = udma_bits_to_command(bits, high_speed); - pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; - u32 *timings; + ide_hwif_t *hwif = HWIF(drive); + pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data; + u32 *timings, *timings2; + u32 timing_local[2]; int ret; + + /* which drive is it ? */ + timings = &pmif->timings[drive->select.b.unit & 0x01]; + timings2 = &pmif->timings[(drive->select.b.unit & 0x01) + 2]; + /* Copy timings to local image */ + timing_local[0] = *timings; + timing_local[1] = *timings2; + + /* Calculate timings for interface */ + if (pmif->kind == controller_un_ata6) + ret = set_timings_udma_ata6( &timing_local[0], + &timing_local[1], + mode); + else + ret = set_timings_udma_ata4(&timing_local[0], mode); + if (ret) + return 0; + /* Set feature on drive */ - printk(KERN_INFO "%s: Enabling Ultra DMA %d\n", drive->name, feature & 0xf); - ret = pmac_ide_do_setfeature(drive, feature); + printk(KERN_INFO "%s: Enabling Ultra DMA %d\n", drive->name, mode & 0x0f); + ret = pmac_ide_do_setfeature(drive, mode); if (ret) { printk(KERN_WARNING "%s: Failed !\n", drive->name); return 0; } - if (!drive->init_speed) - drive->init_speed = feature; - - /* which drive is it ? */ - if (drive->select.b.unit & 0x01) - timings = &pmif->timings[1]; - else - timings = &pmif->timings[0]; + /* Apply timings to controller */ + *timings = timing_local[0]; + *timings2 = timing_local[1]; - set_timings_udma(timings, feature); + /* Set speed info in drive */ + drive->current_speed = mode; + if (!drive->init_speed) + drive->init_speed = mode; - drive->current_speed = feature; return 1; } +static __pmac int pmac_ide_dma_check(ide_drive_t *drive) { - int ata4, udma; struct hd_driveid *id = drive->id; ide_hwif_t *hwif = HWIF(drive); pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data; int enable = 1; - + int map; drive->using_dma = 0; if (pmif == NULL) @@ -1218,29 +1507,32 @@ enable = 0; if (HWIF(drive)->ide_dma_bad_drive(drive)) enable = 0; - udma = 0; - ata4 = (pmif->kind == controller_kl_ata4 || - pmif->kind == controller_kl_ata4_80); - - if(enable) { - if (ata4 && (drive->media == ide_disk) && - (id->field_valid & 0x0004) && (id->dma_ultra & 0x1f)) { - /* UltraDMA modes. */ - drive->using_dma = pmac_ide_udma_enable(drive, - pmif->kind == controller_kl_ata4_80); - } - if (!drive->using_dma && (id->dma_mword & 0x0007)) { - /* Normal MultiWord DMA modes. */ - drive->using_dma = pmac_ide_mdma_enable(drive); + + if (enable) { + short mode; + + map = XFER_MWDMA; + if (pmif->kind == controller_kl_ata4 || pmif->kind == controller_un_ata6) { + map |= XFER_UDMA; + if (pmif->cable_80) { + map |= XFER_UDMA_66; + if (pmif->kind == controller_un_ata6) + map |= XFER_UDMA_100; + } } + mode = ide_find_best_mode(drive, map); + if (mode & XFER_UDMA) + drive->using_dma = pmac_ide_udma_enable(drive, mode); + else if (mode & XFER_MWDMA) + drive->using_dma = pmac_ide_mdma_enable(drive, mode); hwif->OUTB(0, IDE_CONTROL_REG); /* Apply settings to controller */ - pmac_ide_selectproc(drive); + pmac_ide_do_update_timings(drive); } return 0; } -static int +static int __pmac pmac_ide_dma_read (ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); @@ -1255,23 +1547,26 @@ if (pmif == NULL) return 1; - ata4 = (pmif->kind == controller_kl_ata4 || - pmif->kind == controller_kl_ata4_80); + ata4 = (pmif->kind == controller_kl_ata4); - if (!pmac_ide_build_dmatable(drive, 0)) + if (!pmac_ide_build_dmatable(drive, rq, PCI_DMA_FROMDEVICE)) + /* try PIO instead of DMA */ return 1; + /* Apple adds 60ns to wrDataSetup on reads */ if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) { writel(pmif->timings[unit]+0x00800000UL, (unsigned *)(IDE_DATA_REG+IDE_TIMING_CONFIG)); (void)readl((unsigned *)(IDE_DATA_REG + IDE_TIMING_CONFIG)); } - drive->waiting_for_dma = 1; + + drive->waiting_for_dma = 1; if (drive->media != ide_disk) return 0; + /* paranoia check */ if (HWGROUP(drive)->handler != NULL) /* paranoia check */ BUG(); - ide_set_handler(drive, &ide_dma_intr, WAIT_CMD, NULL); + ide_set_handler(drive, &ide_dma_intr, 2*WAIT_CMD, NULL); /* * FIX ME to use only ACB ide_task_t args Struct */ @@ -1293,7 +1588,7 @@ return pmac_ide_dma_begin(drive); } -static int +static int __pmac pmac_ide_dma_write (ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); @@ -1308,23 +1603,27 @@ if (pmif == NULL) return 1; - ata4 = (pmif->kind == controller_kl_ata4 || - pmif->kind == controller_kl_ata4_80); + ata4 = (pmif->kind == controller_kl_ata4); - if (!pmac_ide_build_dmatable(drive, 1)) + if (!pmac_ide_build_dmatable(drive, rq, PCI_DMA_TODEVICE)) + /* try PIO instead of DMA */ return 1; + /* Apple adds 60ns to wrDataSetup on reads */ if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) { writel(pmif->timings[unit], (unsigned *)(IDE_DATA_REG+IDE_TIMING_CONFIG)); (void)readl((unsigned *)(IDE_DATA_REG + IDE_TIMING_CONFIG)); } + drive->waiting_for_dma = 1; if (drive->media != ide_disk) return 0; + + /* paranoia check */ if (HWGROUP(drive)->handler != NULL) /* paranoia check */ BUG(); - ide_set_handler(drive, &ide_dma_intr, WAIT_CMD, NULL); + ide_set_handler(drive, &ide_dma_intr, 2*WAIT_CMD, NULL); /* * FIX ME to use only ACB ide_task_t args Struct */ @@ -1346,13 +1645,13 @@ return pmac_ide_dma_begin(drive); } -static int +static int __pmac pmac_ide_dma_count (ide_drive_t *drive) { return HWIF(drive)->ide_dma_begin(drive); } -static int +static int __pmac pmac_ide_dma_begin (ide_drive_t *drive) { pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; @@ -1368,7 +1667,7 @@ return 0; } -static int +static int __pmac pmac_ide_dma_end (ide_drive_t *drive) { pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; @@ -1387,7 +1686,7 @@ return (dstat & (RUN|DEAD|ACTIVE)) != RUN; } -static int +static int __pmac pmac_ide_dma_test_irq (ide_drive_t *drive) { pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; @@ -1427,33 +1726,33 @@ return 1; if (!drive->waiting_for_dma) printk(KERN_WARNING "ide%d, ide_dma_test_irq \ - called while not waiting\n", pmif->index); + called while not waiting\n", HWIF(drive)->index); /* If dbdma didn't execute the STOP command yet, the * active bit is still set */ drive->waiting_for_dma++; if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) { printk(KERN_WARNING "ide%d, timeout waiting \ - for dbdma command stop\n", pmif->index); + for dbdma command stop\n", HWIF(drive)->index); return 1; } udelay(1); return 0; } -static int +static int __pmac pmac_ide_dma_host_off (ide_drive_t *drive) { return 0; } -static int +static int __pmac pmac_ide_dma_host_on (ide_drive_t *drive) { return 0; } -static int +static int __pmac pmac_ide_dma_lostirq (ide_drive_t *drive) { pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; @@ -1474,9 +1773,13 @@ { struct pmac_ide_hwif *pmif = &pmac_ide[ix]; - if (request_OF_resource(np, 1, " (mac-io IDE DMA)") == NULL) { - printk(KERN_ERR "ide-pmac(%s): can't request DMA resource !\n", np->name); - return; + if (device_is_compatible(np, "kauai-ata")) { + pmif->dma_regs = (volatile struct dbdma_regs*)(pmif->mapbase + 0x1000); + } else { + if (request_OF_resource(np, 1, " (mac-io IDE DMA)") == NULL) { + printk(KERN_ERR "ide-pmac(%s): can't request DMA resource !\n", np->name); + return; + } } pmif->dma_regs = @@ -1535,7 +1838,7 @@ #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ static void __pmac -idepmac_sleep_device(ide_drive_t *drive, unsigned base) +idepmac_sleep_device(ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); int j; @@ -1546,19 +1849,21 @@ switch (drive->media) { case ide_disk: /* Spin down the drive */ - hwif->OUTB(drive->select.all, base+0x60); - (void) hwif->INB(base+0x60); + SELECT_DRIVE(drive); + SELECT_MASK(drive, 0); + hwif->OUTB(drive->select.all, IDE_SELECT_REG); + (void) hwif->INB(IDE_SELECT_REG); udelay(100); - hwif->OUTB(0x0, base+0x30); - hwif->OUTB(0x0, base+0x20); - hwif->OUTB(0x0, base+0x40); - hwif->OUTB(0x0, base+0x50); - hwif->OUTB(0xe0, base+0x70); - hwif->OUTB(0x2, base+0x160); + hwif->OUTB(0x00, IDE_SECTOR_REG); + hwif->OUTB(0x00, IDE_NSECTOR_REG); + hwif->OUTB(0x00, IDE_LCYL_REG); + hwif->OUTB(0x00, IDE_HCYL_REG); + hwif->OUTB(drive->ctl|2, IDE_CONTROL_REG); + hwif->OUTB(WIN_STANDBYNOW1, IDE_COMMAND_REG); for (j = 0; j < 10; j++) { u8 status; mdelay(100); - status = hwif->INB(base+0x70); + status = hwif->INB(IDE_STATUS_REG); if (!(status & BUSY_STAT) && (status & DRQ_STAT)) break; } @@ -1637,8 +1942,8 @@ } } -static void -idepmac_sleep_drive(ide_drive_t *drive, unsigned long base) +static void __pmac +idepmac_sleep_drive(ide_drive_t *drive) { int unlock = 0; @@ -1651,13 +1956,13 @@ /* Lock HW group */ HWGROUP(drive)->busy = 1; /* Stop the device */ - idepmac_sleep_device(drive, base); + idepmac_sleep_device(drive); } if (unlock) spin_unlock_irq(&io_request_lock); } -static void +static void __pmac idepmac_wake_drive(ide_drive_t *drive, unsigned long base) { ide_hwif_t *hwif = HWIF(drive); @@ -1665,7 +1970,7 @@ int j; /* Reset timings */ - pmac_ide_selectproc(drive); + pmac_ide_do_update_timings(drive); mdelay(10); /* Wait up to 20 seconds for the drive to be ready */ @@ -1716,7 +2021,7 @@ for (dn=0; dndrives[dn].present) continue; - idepmac_sleep_drive(&hwif->drives[dn], base); + idepmac_sleep_drive(&hwif->drives[dn]); } /* Disable irq during sleep */ disable_irq(pmac_ide[i].irq); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/raid/pdcraid.c linux.21pre4-ac2/drivers/ide/raid/pdcraid.c --- linux.21pre4/drivers/ide/raid/pdcraid.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/raid/pdcraid.c 2003-01-09 19:43:30.000000000 +0000 @@ -159,48 +159,11 @@ } -unsigned long partition_map_normal(unsigned long block, unsigned long partition_off, unsigned long partition_size, int stride) +static unsigned long partition_map_normal(unsigned long block, unsigned long partition_off, unsigned long partition_size, int stride) { return block + partition_off; } -unsigned long partition_map_linux(unsigned long block, unsigned long partition_off, unsigned long partition_size, int stride) -{ - unsigned long newblock; - - newblock = stride - (partition_off%stride); if (newblock == stride) newblock = 0; - newblock += block; - newblock = newblock % partition_size; - newblock += partition_off; - - return newblock; -} - -static int funky_remap[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; - -unsigned long partition_map_linux_raid0_4disk(unsigned long block, unsigned long partition_off, unsigned long partition_size, int stride) -{ - unsigned long newblock,temp,temp2; - - newblock = stride - (partition_off%stride); if (newblock == stride) newblock = 0; - - if (block < (partition_size / (8*stride))*8*stride ) { - temp = block % stride; - temp2 = block / stride; - temp2 = ((temp2>>3)<<3)|(funky_remap[temp2&7]); - block = temp2*stride+temp; - } - - - newblock += block; - newblock = newblock % partition_size; - newblock += partition_off; - - return newblock; -} - - - static int pdcraid0_make_request (request_queue_t *q, int rw, struct buffer_head * bh) { unsigned long rsect; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ide/raid/silraid.c linux.21pre4-ac2/drivers/ide/raid/silraid.c --- linux.21pre4/drivers/ide/raid/silraid.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ide/raid/silraid.c 2003-01-09 19:43:30.000000000 +0000 @@ -157,7 +157,7 @@ } -unsigned long partition_map_normal(unsigned long block, unsigned long partition_off, unsigned long partition_size, int stride) +static unsigned long partition_map_normal(unsigned long block, unsigned long partition_off, unsigned long partition_size, int stride) { return block + partition_off; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ieee1394/pcilynx.c linux.21pre4-ac2/drivers/ieee1394/pcilynx.c --- linux.21pre4/drivers/ieee1394/pcilynx.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ieee1394/pcilynx.c 2003-01-06 15:38:25.000000000 +0000 @@ -891,21 +891,13 @@ ssize_t retval; void *membase; - if (*offset != off) /* Check for EOF before we trust wrap */ - return 0; - - /* FIXME: Signed wrap is undefined in C - wants fixing up */ - if (off + count > off) - return 0; - - if ((off + count) > PCILYNX_MAX_MEMORY + 1) { - count = PCILYNX_MAX_MEMORY + 1 - off; + if ((off + count) > PCILYNX_MAX_MEMORY+1) { + count = PCILYNX_MAX_MEMORY+1 - off; } - if (count == 0) { - return 0; + if (count == 0 || off > PCILYNX_MAX_MEMORY) { + return -ENOSPC; } - switch (md->type) { case rom: membase = md->lynx->local_rom; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/ieee1394/sbp2.c linux.21pre4-ac2/drivers/ieee1394/sbp2.c --- linux.21pre4/drivers/ieee1394/sbp2.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/ieee1394/sbp2.c 2003-01-09 20:16:57.000000000 +0000 @@ -1511,7 +1511,7 @@ * physical dma in hardware). Mostly just here for debugging... */ static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid, int destid, quadlet_t *data, - u64 addr, unsigned int length) + u64 addr, unsigned int length, u16 flags) { /* @@ -1527,7 +1527,7 @@ * physical dma in hardware). Mostly just here for debugging... */ static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, quadlet_t *data, - u64 addr, unsigned int length) + u64 addr, unsigned int length, u16 flags) { /* diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/input/keybdev.c linux.21pre4-ac2/drivers/input/keybdev.c --- linux.21pre4/drivers/input/keybdev.c 2003-01-29 16:27:04.000000000 +0000 +++ linux.21pre4-ac2/drivers/input/keybdev.c 2003-01-06 15:38:26.000000000 +0000 @@ -201,6 +201,7 @@ input_open_device(handle); // printk(KERN_INFO "keybdev.c: Adding keyboard: input%d\n", dev->number); + kbd_refresh_leds(); return handle; } @@ -222,6 +223,7 @@ { input_register_handler(&keybdev_handler); kbd_ledfunc = keybdev_ledfunc; + kbd_refresh_leds(); if (jp_kbd_109) { x86_keycodes[0xb5] = 0x73; /* backslash, underscore */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/isdn/hisax/st5481.h linux.21pre4-ac2/drivers/isdn/hisax/st5481.h --- linux.21pre4/drivers/isdn/hisax/st5481.h 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/isdn/hisax/st5481.h 2003-02-04 15:54:22.000000000 +0000 @@ -218,6 +218,19 @@ #define L1_EVENT_COUNT (EV_TIMER3 + 1) +#if (__GNUC__ > 2) + +#define ERR(format, arg...) \ +printk(KERN_ERR __FILE__ ": %s: " format "\n" , __FUNCTION__ , ## arg) + +#define WARN(format, arg...) \ +printk(KERN_WARNING __FILE__ ": %s: " format "\n" , __FUNCTION__ , ## arg) + +#define INFO(format, arg...) \ +printk(KERN_INFO __FILE__ ": %s: " format "\n" , __FUNCTION__ , ## arg) + +#else + #define ERR(format, arg...) \ printk(KERN_ERR __FILE__ ": " __FUNCTION__ ": " format "\n" , ## arg) @@ -227,6 +240,8 @@ #define INFO(format, arg...) \ printk(KERN_INFO __FILE__ ": " __FUNCTION__ ": " format "\n" , ## arg) +#endif + #include "isdnhdlc.h" #include "fsm.h" #include "hisax_if.h" diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/md/Config.in linux.21pre4-ac2/drivers/md/Config.in --- linux.21pre4/drivers/md/Config.in 2003-01-29 16:27:04.000000000 +0000 +++ linux.21pre4-ac2/drivers/md/Config.in 2003-01-06 17:11:06.000000000 +0000 @@ -14,5 +14,8 @@ dep_tristate ' Multipath I/O support' CONFIG_MD_MULTIPATH $CONFIG_BLK_DEV_MD dep_tristate ' Logical volume manager (LVM) support' CONFIG_BLK_DEV_LVM $CONFIG_MD +if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then + dep_tristate ' Device-mapper support (EXPERIMENTAL)' CONFIG_BLK_DEV_DM $CONFIG_MD +fi endmenu diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/md/dm.c linux.21pre4-ac2/drivers/md/dm.c --- linux.21pre4/drivers/md/dm.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/md/dm.c 2003-01-06 17:11:06.000000000 +0000 @@ -0,0 +1,1158 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include +#include + +/* we only need this for the lv_bmap struct definition, not happy */ +#include + +#define DEFAULT_READ_AHEAD 64 + +static const char *_name = DM_NAME; + +static int major = 0; +static int _major = 0; + +struct io_hook { + struct mapped_device *md; + struct target *target; + int rw; + + void (*end_io) (struct buffer_head * bh, int uptodate); + void *context; +}; + +static kmem_cache_t *_io_hook_cache; + +static struct mapped_device *_devs[MAX_DEVICES]; +static struct rw_semaphore _dev_locks[MAX_DEVICES]; + +/* + * This lock is only held by dm_create and dm_set_name to avoid + * race conditions where someone else may create a device with + * the same name. + */ +static spinlock_t _create_lock = SPIN_LOCK_UNLOCKED; + +/* block device arrays */ +static int _block_size[MAX_DEVICES]; +static int _blksize_size[MAX_DEVICES]; +static int _hardsect_size[MAX_DEVICES]; + +static devfs_handle_t _dev_dir; + +static int request(request_queue_t * q, int rw, struct buffer_head *bh); +static int dm_user_bmap(struct inode *inode, struct lv_bmap *lvb); + +/* + * Protect the mapped_devices referenced from _dev[] + */ +struct mapped_device *dm_get_r(int minor) +{ + struct mapped_device *md; + + if (minor >= MAX_DEVICES) + return NULL; + + down_read(_dev_locks + minor); + md = _devs[minor]; + if (!md) + up_read(_dev_locks + minor); + return md; +} + +struct mapped_device *dm_get_w(int minor) +{ + struct mapped_device *md; + + if (minor >= MAX_DEVICES) + return NULL; + + down_write(_dev_locks + minor); + md = _devs[minor]; + if (!md) + up_write(_dev_locks + minor); + return md; +} + +static int namecmp(struct mapped_device *md, const char *name, int nametype) +{ + switch (nametype) { + case DM_LOOKUP_BY_NAME: + return strcmp(md->name, name); + break; + + case DM_LOOKUP_BY_UUID: + if (!md->uuid) + return -1; /* never equal */ + + return strcmp(md->uuid, name); + break; + + default: + DMWARN("Unknown comparison type in namecmp: %d", nametype); + BUG(); + } + + return -1; +} + +/* + * The interface (eg, ioctl) will probably access the devices + * through these slow 'by name' locks, this needs improving at + * some point if people start playing with *large* numbers of dm + * devices. + */ +struct mapped_device *dm_get_name_r(const char *name, int nametype) +{ + int i; + struct mapped_device *md; + + for (i = 0; i < MAX_DEVICES; i++) { + md = dm_get_r(i); + if (md) { + if (!namecmp(md, name, nametype)) + return md; + + dm_put_r(md); + } + } + + return NULL; +} + +struct mapped_device *dm_get_name_w(const char *name, int nametype) +{ + int i; + struct mapped_device *md; + + /* + * To avoid getting write locks on all the devices we try + * and promote a read lock to a write lock, this can + * fail, in which case we just start again. + */ + + restart: + for (i = 0; i < MAX_DEVICES; i++) { + md = dm_get_r(i); + if (!md) + continue; + + if (namecmp(md, name, nametype)) { + dm_put_r(md); + continue; + } + + /* found it */ + dm_put_r(md); + + md = dm_get_w(i); + if (!md) + goto restart; + + if (namecmp(md, name, nametype)) { + dm_put_w(md); + goto restart; + } + + return md; + } + + return NULL; +} + +void dm_put_r(struct mapped_device *md) +{ + int minor = MINOR(md->dev); + + if (minor >= MAX_DEVICES) + return; + + up_read(_dev_locks + minor); +} + +void dm_put_w(struct mapped_device *md) +{ + int minor = MINOR(md->dev); + + if (minor >= MAX_DEVICES) + return; + + up_write(_dev_locks + minor); +} + +/* + * Setup and tear down the driver + */ +static __init void init_locks(void) +{ + int i; + + for (i = 0; i < MAX_DEVICES; i++) + init_rwsem(_dev_locks + i); +} + +static __init int local_init(void) +{ + int r; + + init_locks(); + + /* allocate a slab for the io-hooks */ + if (!_io_hook_cache && + !(_io_hook_cache = kmem_cache_create("dm io hooks", + sizeof(struct io_hook), + 0, 0, NULL, NULL))) + return -ENOMEM; + + _major = major; + r = devfs_register_blkdev(_major, _name, &dm_blk_dops); + if (r < 0) { + DMERR("register_blkdev failed"); + kmem_cache_destroy(_io_hook_cache); + return r; + } + + if (!_major) + _major = r; + + /* set up the arrays */ + read_ahead[_major] = DEFAULT_READ_AHEAD; + blk_size[_major] = _block_size; + blksize_size[_major] = _blksize_size; + hardsect_size[_major] = _hardsect_size; + + blk_queue_make_request(BLK_DEFAULT_QUEUE(_major), request); + + _dev_dir = devfs_mk_dir(0, DM_DIR, NULL); + + return 0; +} + +static void local_exit(void) +{ + if (kmem_cache_destroy(_io_hook_cache)) + DMWARN("io_hooks still allocated during unregistration"); + _io_hook_cache = NULL; + + if (devfs_unregister_blkdev(_major, _name) < 0) + DMERR("devfs_unregister_blkdev failed"); + + read_ahead[_major] = 0; + blk_size[_major] = NULL; + blksize_size[_major] = NULL; + hardsect_size[_major] = NULL; + _major = 0; + + DMINFO("cleaned up"); +} + +/* + * We have a lot of init/exit functions, so it seems easier to + * store them in an array. The disposable macro 'xx' + * expands a prefix into a pair of function names. + */ +static struct { + int (*init)(void); + void (*exit)(void); + +} _inits[] = { +#define xx(n) {n ## _init, n ## _exit}, + xx(local) + xx(dm_target) + xx(dm_linear) + xx(dm_stripe) + xx(dm_snapshot) + xx(dm_interface) +#undef xx +}; + +static int __init dm_init(void) +{ + const int count = sizeof(_inits) / sizeof(*_inits); + + int r, i; + + for (i = 0; i < count; i++) { + r = _inits[i].init(); + if (r) + goto bad; + } + + return 0; + + bad: + while (i--) + _inits[i].exit(); + + return r; +} + +static void __exit dm_exit(void) +{ + int i = sizeof(_inits) / sizeof(*_inits); + + dm_destroy_all(); + while (i--) + _inits[i].exit(); +} + +/* + * Block device functions + */ +static int dm_blk_open(struct inode *inode, struct file *file) +{ + struct mapped_device *md; + + md = dm_get_w(MINOR(inode->i_rdev)); + if (!md) + return -ENXIO; + + md->use_count++; + dm_put_w(md); + + return 0; +} + +static int dm_blk_close(struct inode *inode, struct file *file) +{ + struct mapped_device *md; + + md = dm_get_w(MINOR(inode->i_rdev)); + if (!md) + return -ENXIO; + + if (md->use_count < 1) + DMWARN("incorrect reference count found in mapped_device"); + + md->use_count--; + dm_put_w(md); + + return 0; +} + +/* In 512-byte units */ +#define VOLUME_SIZE(minor) (_block_size[(minor)] << 1) + +static int dm_blk_ioctl(struct inode *inode, struct file *file, + uint command, unsigned long a) +{ + int minor = MINOR(inode->i_rdev); + long size; + + if (minor >= MAX_DEVICES) + return -ENXIO; + + switch (command) { + case BLKROSET: + case BLKROGET: + case BLKRASET: + case BLKRAGET: + case BLKFLSBUF: + case BLKSSZGET: + //case BLKRRPART: /* Re-read partition tables */ + //case BLKPG: + case BLKELVGET: + case BLKELVSET: + case BLKBSZGET: + case BLKBSZSET: + return blk_ioctl(inode->i_rdev, command, a); + break; + + case BLKGETSIZE: + size = VOLUME_SIZE(minor); + if (copy_to_user((void *) a, &size, sizeof(long))) + return -EFAULT; + break; + + case BLKGETSIZE64: + size = VOLUME_SIZE(minor); + if (put_user((u64) ((u64) size) << 9, (u64 *) a)) + return -EFAULT; + break; + + case BLKRRPART: + return -ENOTTY; + + case LV_BMAP: + return dm_user_bmap(inode, (struct lv_bmap *) a); + + default: + DMWARN("unknown block ioctl 0x%x", command); + return -ENOTTY; + } + + return 0; +} + +static inline struct io_hook *alloc_io_hook(void) +{ + return kmem_cache_alloc(_io_hook_cache, GFP_NOIO); +} + +static inline void free_io_hook(struct io_hook *ih) +{ + kmem_cache_free(_io_hook_cache, ih); +} + +/* + * FIXME: We need to decide if deferred_io's need + * their own slab, I say no for now since they are + * only used when the device is suspended. + */ +static inline struct deferred_io *alloc_deferred(void) +{ + return kmalloc(sizeof(struct deferred_io), GFP_NOIO); +} + +static inline void free_deferred(struct deferred_io *di) +{ + kfree(di); +} + +/* + * Call a target's optional error function if an I/O failed. + */ +static inline int call_err_fn(struct io_hook *ih, struct buffer_head *bh) +{ + dm_err_fn err = ih->target->type->err; + + if (err) + return err(bh, ih->rw, ih->target->private); + + return 0; +} + +/* + * bh->b_end_io routine that decrements the pending count + * and then calls the original bh->b_end_io fn. + */ +static void dec_pending(struct buffer_head *bh, int uptodate) +{ + struct io_hook *ih = bh->b_private; + + if (!uptodate && call_err_fn(ih, bh)) + return; + + if (atomic_dec_and_test(&ih->md->pending)) + /* nudge anyone waiting on suspend queue */ + wake_up(&ih->md->wait); + + bh->b_end_io = ih->end_io; + bh->b_private = ih->context; + free_io_hook(ih); + + bh->b_end_io(bh, uptodate); +} + +/* + * Add the bh to the list of deferred io. + */ +static int queue_io(struct buffer_head *bh, int rw) +{ + struct deferred_io *di = alloc_deferred(); + struct mapped_device *md; + + if (!di) + return -ENOMEM; + + md = dm_get_w(MINOR(bh->b_rdev)); + if (!md) { + free_deferred(di); + return -ENXIO; + } + + if (!md->suspended) { + dm_put_w(md); + free_deferred(di); + return 1; + } + + di->bh = bh; + di->rw = rw; + di->next = md->deferred; + md->deferred = di; + + dm_put_w(md); + + return 0; /* deferred successfully */ +} + +/* + * Do the bh mapping for a given leaf + */ +static inline int __map_buffer(struct mapped_device *md, + struct buffer_head *bh, int rw, int leaf) +{ + int r; + dm_map_fn fn; + void *context; + struct io_hook *ih = NULL; + struct target *ti = md->map->targets + leaf; + + fn = ti->type->map; + context = ti->private; + + ih = alloc_io_hook(); + + if (!ih) + return -1; + + ih->md = md; + ih->rw = rw; + ih->target = ti; + ih->end_io = bh->b_end_io; + ih->context = bh->b_private; + + r = fn(bh, rw, context); + + if (r > 0) { + /* hook the end io request fn */ + atomic_inc(&md->pending); + bh->b_end_io = dec_pending; + bh->b_private = ih; + + } else if (r == 0) + /* we don't need to hook */ + free_io_hook(ih); + + else if (r < 0) { + free_io_hook(ih); + return -1; + } + + return r; +} + +/* + * Search the btree for the correct target. + */ +static inline int __find_node(struct dm_table *t, struct buffer_head *bh) +{ + int l, n = 0, k = 0; + offset_t *node; + + for (l = 0; l < t->depth; l++) { + n = get_child(n, k); + node = get_node(t, l, n); + + for (k = 0; k < KEYS_PER_NODE; k++) + if (node[k] >= bh->b_rsector) + break; + } + + return (KEYS_PER_NODE * n) + k; +} + +static int request(request_queue_t * q, int rw, struct buffer_head *bh) +{ + struct mapped_device *md; + int r, minor = MINOR(bh->b_rdev); + unsigned int block_size = _blksize_size[minor]; + + md = dm_get_r(minor); + if (!md) { + buffer_IO_error(bh); + return 0; + } + + /* + * Sanity checks. + */ + if (bh->b_size > block_size) + DMERR("request is larger than block size " + "b_size (%d), block size (%d)", + bh->b_size, block_size); + + if (bh->b_rsector & ((bh->b_size >> 9) - 1)) + DMERR("misaligned block requested logical " + "sector (%lu), b_size (%d)", + bh->b_rsector, bh->b_size); + + /* + * If we're suspended we have to queue + * this io for later. + */ + while (md->suspended) { + dm_put_r(md); + + if (rw == READA) + goto bad_no_lock; + + r = queue_io(bh, rw); + + if (r < 0) + goto bad_no_lock; + + else if (r == 0) + return 0; /* deferred successfully */ + + /* + * We're in a while loop, because someone could suspend + * before we get to the following read lock. + */ + md = dm_get_r(minor); + if (!md) { + buffer_IO_error(bh); + return 0; + } + } + + if ((r = __map_buffer(md, bh, rw, __find_node(md->map, bh))) < 0) + goto bad; + + dm_put_r(md); + return r; + + bad: + dm_put_r(md); + + bad_no_lock: + buffer_IO_error(bh); + return 0; +} + +static int check_dev_size(int minor, unsigned long block) +{ + /* FIXME: check this */ + unsigned long max_sector = (_block_size[minor] << 1) + 1; + unsigned long sector = (block + 1) * (_blksize_size[minor] >> 9); + + return (sector > max_sector) ? 0 : 1; +} + +/* + * Creates a dummy buffer head and maps it (for lilo). + */ +static int do_bmap(kdev_t dev, unsigned long block, + kdev_t * r_dev, unsigned long *r_block) +{ + struct mapped_device *md; + struct buffer_head bh; + int minor = MINOR(dev), r; + struct target *t; + + md = dm_get_r(minor); + if (!md) + return -ENXIO; + + if (md->suspended) { + dm_put_r(md); + return -EPERM; + } + + if (!check_dev_size(minor, block)) { + dm_put_r(md); + return -EINVAL; + } + + /* setup dummy bh */ + memset(&bh, 0, sizeof(bh)); + bh.b_blocknr = block; + bh.b_dev = bh.b_rdev = dev; + bh.b_size = _blksize_size[minor]; + bh.b_rsector = block * (bh.b_size >> 9); + + /* find target */ + t = md->map->targets + __find_node(md->map, &bh); + + /* do the mapping */ + r = t->type->map(&bh, READ, t->private); + + *r_dev = bh.b_rdev; + *r_block = bh.b_rsector / (bh.b_size >> 9); + + dm_put_r(md); + return r; +} + +/* + * Marshals arguments and results between user and kernel space. + */ +static int dm_user_bmap(struct inode *inode, struct lv_bmap *lvb) +{ + unsigned long block, r_block; + kdev_t r_dev; + int r; + + if (get_user(block, &lvb->lv_block)) + return -EFAULT; + + if ((r = do_bmap(inode->i_rdev, block, &r_dev, &r_block))) + return r; + + if (put_user(kdev_t_to_nr(r_dev), &lvb->lv_dev) || + put_user(r_block, &lvb->lv_block)) + return -EFAULT; + + return 0; +} + +/* + * See if the device with a specific minor # is free. The write + * lock is held when it returns successfully. + */ +static inline int specific_dev(int minor, struct mapped_device *md) +{ + if (minor >= MAX_DEVICES) { + DMWARN("request for a mapped_device beyond MAX_DEVICES (%d)", + MAX_DEVICES); + return -1; + } + + down_write(_dev_locks + minor); + if (_devs[minor]) { + /* in use */ + up_write(_dev_locks + minor); + return -1; + } + + return minor; +} + +/* + * Find the first free device. Again the write lock is held on + * success. + */ +static int any_old_dev(struct mapped_device *md) +{ + int i; + + for (i = 0; i < MAX_DEVICES; i++) + if (specific_dev(i, md) != -1) + return i; + + return -1; +} + +/* + * Allocate and initialise a blank device. + * Caller must ensure uuid is null-terminated. + * Device is returned with a write lock held. + */ +static struct mapped_device *alloc_dev(const char *name, const char *uuid, + int minor) +{ + struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL); + int len; + + if (!md) { + DMWARN("unable to allocate device, out of memory."); + return NULL; + } + + memset(md, 0, sizeof(*md)); + + /* + * This grabs the write lock if it succeeds. + */ + minor = (minor < 0) ? any_old_dev(md) : specific_dev(minor, md); + if (minor < 0) { + kfree(md); + return NULL; + } + + md->dev = MKDEV(_major, minor); + md->suspended = 0; + + strncpy(md->name, name, sizeof(md->name) - 1); + md->name[sizeof(md->name) - 1] = '\0'; + + /* + * Copy in the uuid. + */ + if (uuid && *uuid) { + len = strlen(uuid) + 1; + if (!(md->uuid = kmalloc(len, GFP_KERNEL))) { + DMWARN("unable to allocate uuid - out of memory."); + kfree(md); + return NULL; + } + strcpy(md->uuid, uuid); + } + + init_waitqueue_head(&md->wait); + return md; +} + +static int __register_device(struct mapped_device *md) +{ + md->devfs_entry = + devfs_register(_dev_dir, md->name, DEVFS_FL_CURRENT_OWNER, + MAJOR(md->dev), MINOR(md->dev), + S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP, + &dm_blk_dops, NULL); + + return 0; +} + +static int __unregister_device(struct mapped_device *md) +{ + devfs_unregister(md->devfs_entry); + return 0; +} + +/* + * The hardsect size for a mapped device is the largest hardsect size + * from the devices it maps onto. + */ +static int __find_hardsect_size(struct list_head *devices) +{ + int result = 512, size; + struct list_head *tmp; + + list_for_each(tmp, devices) { + struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); + size = get_hardsect_size(dd->dev); + if (size > result) + result = size; + } + + return result; +} + +/* + * Bind a table to the device. + */ +static int __bind(struct mapped_device *md, struct dm_table *t) +{ + int minor = MINOR(md->dev); + + md->map = t; + + if (!t->num_targets) { + _block_size[minor] = 0; + _blksize_size[minor] = BLOCK_SIZE; + _hardsect_size[minor] = 0; + return 0; + } + + /* in k */ + _block_size[minor] = (t->highs[t->num_targets - 1] + 1) >> 1; + + _blksize_size[minor] = BLOCK_SIZE; + _hardsect_size[minor] = __find_hardsect_size(&t->devices); + register_disk(NULL, md->dev, 1, &dm_blk_dops, _block_size[minor]); + + return 0; +} + +static void __unbind(struct mapped_device *md) +{ + int minor = MINOR(md->dev); + + dm_table_destroy(md->map); + md->map = NULL; + + _block_size[minor] = 0; + _blksize_size[minor] = 0; + _hardsect_size[minor] = 0; +} + +static int check_name(const char *name) +{ + struct mapped_device *md; + + if (strchr(name, '/') || strlen(name) > DM_NAME_LEN) { + DMWARN("invalid device name"); + return -1; + } + + md = dm_get_name_r(name, DM_LOOKUP_BY_NAME); + if (md) { + dm_put_r(md); + DMWARN("device name already in use"); + return -1; + } + + return 0; +} + +static int check_uuid(const char *uuid) +{ + struct mapped_device *md; + + if (uuid) { + md = dm_get_name_r(uuid, DM_LOOKUP_BY_UUID); + if (md) { + dm_put_r(md); + DMWARN("device uuid already in use"); + return -1; + } + } + + return 0; +} + +/* + * Constructor for a new device. + */ +int dm_create(const char *name, const char *uuid, int minor, int ro, + struct dm_table *table) +{ + int r; + struct mapped_device *md; + + spin_lock(&_create_lock); + if (check_name(name) || check_uuid(uuid)) { + spin_unlock(&_create_lock); + return -EINVAL; + } + + md = alloc_dev(name, uuid, minor); + if (!md) { + spin_unlock(&_create_lock); + return -ENXIO; + } + minor = MINOR(md->dev); + _devs[minor] = md; + + r = __register_device(md); + if (r) + goto err; + + r = __bind(md, table); + if (r) + goto err; + + dm_set_ro(md, ro); + + spin_unlock(&_create_lock); + dm_put_w(md); + return 0; + + err: + _devs[minor] = NULL; + if (md->uuid) + kfree(md->uuid); + + dm_put_w(md); + kfree(md); + spin_unlock(&_create_lock); + return r; +} + +/* + * Renames the device. No lock held. + */ +int dm_set_name(const char *name, int nametype, const char *newname) +{ + int r; + struct mapped_device *md; + + spin_lock(&_create_lock); + if (check_name(newname) < 0) { + spin_unlock(&_create_lock); + return -EINVAL; + } + + md = dm_get_name_w(name, nametype); + if (!md) { + spin_unlock(&_create_lock); + return -ENXIO; + } + + r = __unregister_device(md); + if (r) + goto out; + + strcpy(md->name, newname); + r = __register_device(md); + + out: + dm_put_w(md); + spin_unlock(&_create_lock); + return r; +} + +/* + * Destructor for the device. You cannot destroy an open + * device. Write lock must be held before calling. + * Caller must dm_put_w(md) then kfree(md) if call was successful. + */ +int dm_destroy(struct mapped_device *md) +{ + int minor, r; + + if (md->use_count) + return -EPERM; + + r = __unregister_device(md); + if (r) + return r; + + minor = MINOR(md->dev); + _devs[minor] = NULL; + __unbind(md); + + if (md->uuid) + kfree(md->uuid); + + return 0; +} + +/* + * Destroy all devices - except open ones + */ +void dm_destroy_all(void) +{ + int i, some_destroyed, r; + struct mapped_device *md; + + do { + some_destroyed = 0; + for (i = 0; i < MAX_DEVICES; i++) { + md = dm_get_w(i); + if (!md) + continue; + + r = dm_destroy(md); + dm_put_w(md); + + if (!r) { + kfree(md); + some_destroyed = 1; + } + } + } while (some_destroyed); +} + +/* + * Sets or clears the read-only flag for the device. Write lock + * must be held. + */ +void dm_set_ro(struct mapped_device *md, int ro) +{ + md->read_only = ro; + set_device_ro(md->dev, ro); +} + +/* + * Requeue the deferred buffer_heads by calling generic_make_request. + */ +static void flush_deferred_io(struct deferred_io *c) +{ + struct deferred_io *n; + + while (c) { + n = c->next; + generic_make_request(c->rw, c->bh); + free_deferred(c); + c = n; + } +} + +/* + * Swap in a new table (destroying old one). Write lock must be + * held. + */ +int dm_swap_table(struct mapped_device *md, struct dm_table *table) +{ + int r; + + /* device must be suspended */ + if (!md->suspended) + return -EPERM; + + __unbind(md); + + r = __bind(md, table); + if (r) + return r; + + return 0; +} + +/* + * We need to be able to change a mapping table under a mounted + * filesystem. for example we might want to move some data in + * the background. Before the table can be swapped with + * dm_bind_table, dm_suspend must be called to flush any in + * flight buffer_heads and ensure that any further io gets + * deferred. Write lock must be held. + */ +int dm_suspend(struct mapped_device *md) +{ + int minor = MINOR(md->dev); + DECLARE_WAITQUEUE(wait, current); + + if (md->suspended) + return -EINVAL; + + md->suspended = 1; + dm_put_w(md); + + /* wait for all the pending io to flush */ + add_wait_queue(&md->wait, &wait); + current->state = TASK_UNINTERRUPTIBLE; + do { + md = dm_get_w(minor); + if (!md) { + /* Caller expects to free this lock. Yuck. */ + down_write(_dev_locks + minor); + return -ENXIO; + } + + if (!atomic_read(&md->pending)) + break; + + dm_put_w(md); + schedule(); + + } while (1); + + current->state = TASK_RUNNING; + remove_wait_queue(&md->wait, &wait); + + return 0; +} + +int dm_resume(struct mapped_device *md) +{ + int minor = MINOR(md->dev); + struct deferred_io *def; + + if (!md->suspended || !md->map->num_targets) + return -EINVAL; + + md->suspended = 0; + def = md->deferred; + md->deferred = NULL; + + dm_put_w(md); + flush_deferred_io(def); + run_task_queue(&tq_disk); + + if (!dm_get_w(minor)) { + /* FIXME: yuck */ + down_write(_dev_locks + minor); + return -ENXIO; + } + + return 0; +} + +struct block_device_operations dm_blk_dops = { + open: dm_blk_open, + release: dm_blk_close, + ioctl: dm_blk_ioctl, + owner: THIS_MODULE +}; + +/* + * module hooks + */ +module_init(dm_init); +module_exit(dm_exit); + +MODULE_PARM(major, "i"); +MODULE_PARM_DESC(major, "The major number of the device mapper"); +MODULE_DESCRIPTION(DM_NAME " driver"); +MODULE_AUTHOR("Joe Thornber "); +MODULE_LICENSE("GPL"); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/md/dm-exception-store.c linux.21pre4-ac2/drivers/md/dm-exception-store.c --- linux.21pre4/drivers/md/dm-exception-store.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/md/dm-exception-store.c 2003-01-06 17:11:06.000000000 +0000 @@ -0,0 +1,724 @@ +/* + * dm-snapshot.c + * + * Copyright (C) 2001-2002 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm-snapshot.h" +#include "kcopyd.h" +#include +#include + +#define SECTOR_SIZE 512 +#define SECTOR_SHIFT 9 + +/*----------------------------------------------------------------- + * Persistent snapshots, by persistent we mean that the snapshot + * will survive a reboot. + *---------------------------------------------------------------*/ + +/* + * We need to store a record of which parts of the origin have + * been copied to the snapshot device. The snapshot code + * requires that we copy exception chunks to chunk aligned areas + * of the COW store. It makes sense therefore, to store the + * metadata in chunk size blocks. + * + * There is no backward or forward compatibility implemented, + * snapshots with different disk versions than the kernel will + * not be usable. It is expected that "lvcreate" will blank out + * the start of a fresh COW device before calling the snapshot + * constructor. + * + * The first chunk of the COW device just contains the header. + * After this there is a chunk filled with exception metadata, + * followed by as many exception chunks as can fit in the + * metadata areas. + * + * All on disk structures are in little-endian format. The end + * of the exceptions info is indicated by an exception with a + * new_chunk of 0, which is invalid since it would point to the + * header chunk. + */ + +/* + * Magic for persistent snapshots: "SnAp" - Feeble isn't it. + */ +#define SNAP_MAGIC 0x70416e53 + +/* + * The on-disk version of the metadata. + */ +#define SNAPSHOT_DISK_VERSION 1 + +struct disk_header { + uint32_t magic; + + /* + * Is this snapshot valid. There is no way of recovering + * an invalid snapshot. + */ + int valid; + + /* + * Simple, incrementing version. no backward + * compatibility. + */ + uint32_t version; + + /* In sectors */ + uint32_t chunk_size; +}; + +struct disk_exception { + uint64_t old_chunk; + uint64_t new_chunk; +}; + +struct commit_callback { + void (*callback)(void *, int success); + void *context; +}; + +/* + * The top level structure for a persistent exception store. + */ +struct pstore { + struct dm_snapshot *snap; /* up pointer to my snapshot */ + int version; + int valid; + uint32_t chunk_size; + uint32_t exceptions_per_area; + + /* + * Now that we have an asynchronous kcopyd there is no + * need for large chunk sizes, so it wont hurt to have a + * whole chunks worth of metadata in memory at once. + */ + void *area; + struct kiobuf *iobuf; + + /* + * Used to keep track of which metadata area the data in + * 'chunk' refers to. + */ + uint32_t current_area; + + /* + * The next free chunk for an exception. + */ + uint32_t next_free; + + /* + * The index of next free exception in the current + * metadata area. + */ + uint32_t current_committed; + + atomic_t pending_count; + uint32_t callback_count; + struct commit_callback *callbacks; +}; + +/* + * For performance reasons we want to defer writing a committed + * exceptions metadata to disk so that we can amortise away this + * exensive operation. + * + * For the initial version of this code we will remain with + * synchronous io. There are some deadlock issues with async + * that I haven't yet worked out. + */ +static int do_io(int rw, struct kcopyd_region *where, struct kiobuf *iobuf) +{ + int i, sectors_per_block, nr_blocks, start; + int blocksize = get_hardsect_size(where->dev); + int status; + + sectors_per_block = blocksize / SECTOR_SIZE; + + nr_blocks = where->count / sectors_per_block; + start = where->sector / sectors_per_block; + + for (i = 0; i < nr_blocks; i++) + iobuf->blocks[i] = start++; + + iobuf->length = where->count << 9; + iobuf->locked = 1; + + status = brw_kiovec(rw, 1, &iobuf, where->dev, iobuf->blocks, + blocksize); + if (status != (where->count << 9)) + return -EIO; + + return 0; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 4, 19) +/* + * FIXME: Remove once 2.4.19 has been released. + */ +struct page *vmalloc_to_page(void *vmalloc_addr) +{ + unsigned long addr = (unsigned long) vmalloc_addr; + struct page *page = NULL; + pmd_t *pmd; + pte_t *pte; + pgd_t *pgd; + + pgd = pgd_offset_k(addr); + if (!pgd_none(*pgd)) { + pmd = pmd_offset(pgd, addr); + if (!pmd_none(*pmd)) { + pte = pte_offset(pmd, addr); + if (pte_present(*pte)) { + page = pte_page(*pte); + } + } + } + return page; +} +#endif + +static int allocate_iobuf(struct pstore *ps) +{ + size_t i, r = -ENOMEM, len, nr_pages; + struct page *page; + + len = ps->chunk_size << SECTOR_SHIFT; + + /* + * Allocate the chunk_size block of memory that will hold + * a single metadata area. + */ + ps->area = vmalloc(len); + if (!ps->area) + return r; + + if (alloc_kiovec(1, &ps->iobuf)) + goto bad; + + nr_pages = ps->chunk_size / (PAGE_SIZE / SECTOR_SIZE); + r = expand_kiobuf(ps->iobuf, nr_pages); + if (r) + goto bad; + + /* + * We lock the pages for ps->area into memory since they'll be + * doing a lot of io. + */ + for (i = 0; i < nr_pages; i++) { + page = vmalloc_to_page(ps->area + (i * PAGE_SIZE)); + LockPage(page); + ps->iobuf->maplist[i] = page; + ps->iobuf->nr_pages++; + } + + ps->iobuf->nr_pages = nr_pages; + ps->iobuf->offset = 0; + + return 0; + + bad: + if (ps->iobuf) + free_kiovec(1, &ps->iobuf); + + if (ps->area) + vfree(ps->area); + ps->iobuf = NULL; + return r; +} + +static void free_iobuf(struct pstore *ps) +{ + int i; + + for (i = 0; i < ps->iobuf->nr_pages; i++) + UnlockPage(ps->iobuf->maplist[i]); + ps->iobuf->locked = 0; + + free_kiovec(1, &ps->iobuf); + vfree(ps->area); +} + +/* + * Read or write a chunk aligned and sized block of data from a device. + */ +static int chunk_io(struct pstore *ps, uint32_t chunk, int rw) +{ + int r; + struct kcopyd_region where; + + where.dev = ps->snap->cow->dev; + where.sector = ps->chunk_size * chunk; + where.count = ps->chunk_size; + + r = do_io(rw, &where, ps->iobuf); + if (r) + return r; + + return 0; +} + +/* + * Read or write a metadata area. Remembering to skip the first + * chunk which holds the header. + */ +static int area_io(struct pstore *ps, uint32_t area, int rw) +{ + int r; + uint32_t chunk; + + /* convert a metadata area index to a chunk index */ + chunk = 1 + ((ps->exceptions_per_area + 1) * area); + + r = chunk_io(ps, chunk, rw); + if (r) + return r; + + ps->current_area = area; + return 0; +} + +static int zero_area(struct pstore *ps, uint32_t area) +{ + memset(ps->area, 0, ps->chunk_size << SECTOR_SHIFT); + return area_io(ps, area, WRITE); +} + +static int read_header(struct pstore *ps, int *new_snapshot) +{ + int r; + struct disk_header *dh; + + r = chunk_io(ps, 0, READ); + if (r) + return r; + + dh = (struct disk_header *) ps->area; + + if (dh->magic == 0) { + *new_snapshot = 1; + + } else if (dh->magic == SNAP_MAGIC) { + *new_snapshot = 0; + ps->valid = dh->valid; + ps->version = dh->version; + ps->chunk_size = dh->chunk_size; + + } else { + DMWARN("Invalid/corrupt snapshot"); + r = -ENXIO; + } + + return r; +} + +static int write_header(struct pstore *ps) +{ + struct disk_header *dh; + + memset(ps->area, 0, ps->chunk_size << SECTOR_SHIFT); + + dh = (struct disk_header *) ps->area; + dh->magic = SNAP_MAGIC; + dh->valid = ps->valid; + dh->version = ps->version; + dh->chunk_size = ps->chunk_size; + + return chunk_io(ps, 0, WRITE); +} + +/* + * Access functions for the disk exceptions, these do the endian conversions. + */ +static struct disk_exception *get_exception(struct pstore *ps, uint32_t index) +{ + if (index >= ps->exceptions_per_area) + return NULL; + + return ((struct disk_exception *) ps->area) + index; +} + +static int read_exception(struct pstore *ps, + uint32_t index, struct disk_exception *result) +{ + struct disk_exception *e; + + e = get_exception(ps, index); + if (!e) + return -EINVAL; + + /* copy it */ + result->old_chunk = le64_to_cpu(e->old_chunk); + result->new_chunk = le64_to_cpu(e->new_chunk); + + return 0; +} + +static int write_exception(struct pstore *ps, + uint32_t index, struct disk_exception *de) +{ + struct disk_exception *e; + + e = get_exception(ps, index); + if (!e) + return -EINVAL; + + /* copy it */ + e->old_chunk = cpu_to_le64(de->old_chunk); + e->new_chunk = cpu_to_le64(de->new_chunk); + + return 0; +} + +/* + * Registers the exceptions that are present in the current area. + * 'full' is filled in to indicate if the area has been + * filled. + */ +static int insert_exceptions(struct pstore *ps, int *full) +{ + int i, r; + struct disk_exception de; + + /* presume the area is full */ + *full = 1; + + for (i = 0; i < ps->exceptions_per_area; i++) { + r = read_exception(ps, i, &de); + + if (r) + return r; + + /* + * If the new_chunk is pointing at the start of + * the COW device, where the first metadata area + * is we know that we've hit the end of the + * exceptions. Therefore the area is not full. + */ + if (de.new_chunk == 0LL) { + ps->current_committed = i; + *full = 0; + break; + } + + /* + * Keep track of the start of the free chunks. + */ + if (ps->next_free <= de.new_chunk) + ps->next_free = de.new_chunk + 1; + + /* + * Otherwise we add the exception to the snapshot. + */ + r = dm_add_exception(ps->snap, de.old_chunk, de.new_chunk); + if (r) + return r; + } + + return 0; +} + +static int read_exceptions(struct pstore *ps) +{ + uint32_t area; + int r, full = 1; + + /* + * Keeping reading chunks and inserting exceptions until + * we find a partially full area. + */ + for (area = 0; full; area++) { + r = area_io(ps, area, READ); + if (r) + return r; + + r = insert_exceptions(ps, &full); + if (r) + return r; + + area++; + } + + return 0; +} + +static inline struct pstore *get_info(struct exception_store *store) +{ + return (struct pstore *) store->context; +} + +static int persistent_percentfull(struct exception_store *store) +{ + struct pstore *ps = get_info(store); + return (ps->next_free * store->snap->chunk_size * 100) / + get_dev_size(store->snap->cow->dev); +} + +static void persistent_destroy(struct exception_store *store) +{ + struct pstore *ps = get_info(store); + + vfree(ps->callbacks); + free_iobuf(ps); + kfree(ps); +} + +static int persistent_prepare(struct exception_store *store, + struct exception *e) +{ + struct pstore *ps = get_info(store); + uint32_t stride; + offset_t size = get_dev_size(store->snap->cow->dev); + + /* Is there enough room ? */ + if (size <= (ps->next_free * store->snap->chunk_size)) + return -ENOSPC; + + e->new_chunk = ps->next_free; + + /* + * Move onto the next free pending, making sure to take + * into account the location of the metadata chunks. + */ + stride = (ps->exceptions_per_area + 1); + if (!(++ps->next_free % stride)) + ps->next_free++; + + atomic_inc(&ps->pending_count); + return 0; +} + +static void persistent_commit(struct exception_store *store, + struct exception *e, + void (*callback) (void *, int success), + void *callback_context) +{ + int r, i; + struct pstore *ps = get_info(store); + struct disk_exception de; + struct commit_callback *cb; + + de.old_chunk = e->old_chunk; + de.new_chunk = e->new_chunk; + write_exception(ps, ps->current_committed++, &de); + + /* + * Add the callback to the back of the array. This code + * is the only place where the callback array is + * manipulated, and we know that it will never be called + * multiple times concurrently. + */ + cb = ps->callbacks + ps->callback_count++; + cb->callback = callback; + cb->context = callback_context; + + /* + * If there are no more exceptions in flight, or we have + * filled this metadata area we commit the exceptions to + * disk. + */ + if (atomic_dec_and_test(&ps->pending_count) || + (ps->current_committed == ps->exceptions_per_area)) { + r = area_io(ps, ps->current_area, WRITE); + if (r) + ps->valid = 0; + + for (i = 0; i < ps->callback_count; i++) { + cb = ps->callbacks + i; + cb->callback(cb->context, r == 0 ? 1 : 0); + } + + ps->callback_count = 0; + } + + /* + * Have we completely filled the current area ? + */ + if (ps->current_committed == ps->exceptions_per_area) { + ps->current_committed = 0; + r = zero_area(ps, ps->current_area + 1); + if (r) + ps->valid = 0; + } +} + +static void persistent_drop(struct exception_store *store) +{ + struct pstore *ps = get_info(store); + + ps->valid = 0; + if (write_header(ps)) + DMWARN("write header failed"); +} + +int dm_create_persistent(struct exception_store *store, uint32_t chunk_size) +{ + int r, new_snapshot; + struct pstore *ps; + + /* allocate the pstore */ + ps = kmalloc(sizeof(*ps), GFP_KERNEL); + if (!ps) + return -ENOMEM; + + ps->snap = store->snap; + ps->valid = 1; + ps->version = SNAPSHOT_DISK_VERSION; + ps->chunk_size = chunk_size; + ps->exceptions_per_area = (chunk_size << SECTOR_SHIFT) / + sizeof(struct disk_exception); + ps->next_free = 2; /* skipping the header and first area */ + ps->current_committed = 0; + + r = allocate_iobuf(ps); + if (r) + goto bad; + + /* + * Allocate space for all the callbacks. + */ + ps->callback_count = 0; + atomic_set(&ps->pending_count, 0); + ps->callbacks = vcalloc(ps->exceptions_per_area, + sizeof(*ps->callbacks)); + + if (!ps->callbacks) + goto bad; + + /* + * Read the snapshot header. + */ + r = read_header(ps, &new_snapshot); + if (r) + goto bad; + + /* + * Do we need to setup a new snapshot ? + */ + if (new_snapshot) { + r = write_header(ps); + if (r) { + DMWARN("write_header failed"); + goto bad; + } + + r = zero_area(ps, 0); + if (r) { + DMWARN("zero_area(0) failed"); + goto bad; + } + + } else { + /* + * Sanity checks. + */ + if (ps->chunk_size != chunk_size) { + DMWARN("chunk size for existing snapshot different " + "from that requested"); + r = -EINVAL; + goto bad; + } + + if (ps->version != SNAPSHOT_DISK_VERSION) { + DMWARN("unable to handle snapshot disk version %d", + ps->version); + r = -EINVAL; + goto bad; + } + + /* + * Read the metadata. + */ + r = read_exceptions(ps); + if (r) + goto bad; + } + + store->destroy = persistent_destroy; + store->prepare_exception = persistent_prepare; + store->commit_exception = persistent_commit; + store->drop_snapshot = persistent_drop; + store->percent_full = persistent_percentfull; + store->context = ps; + + return r; + + bad: + if (ps) { + if (ps->callbacks) + vfree(ps->callbacks); + + if (ps->iobuf) + free_iobuf(ps); + + kfree(ps); + } + return r; +} + +/*----------------------------------------------------------------- + * Implementation of the store for non-persistent snapshots. + *---------------------------------------------------------------*/ +struct transient_c { + offset_t next_free; +}; + +void transient_destroy(struct exception_store *store) +{ + kfree(store->context); +} + +int transient_prepare(struct exception_store *store, struct exception *e) +{ + struct transient_c *tc = (struct transient_c *) store->context; + offset_t size = get_dev_size(store->snap->cow->dev); + + if (size < (tc->next_free + store->snap->chunk_size)) + return -1; + + e->new_chunk = sector_to_chunk(store->snap, tc->next_free); + tc->next_free += store->snap->chunk_size; + + return 0; +} + +void transient_commit(struct exception_store *store, + struct exception *e, + void (*callback) (void *, int success), + void *callback_context) +{ + /* Just succeed */ + callback(callback_context, 1); +} + +static int transient_percentfull(struct exception_store *store) +{ + struct transient_c *tc = (struct transient_c *) store->context; + return (tc->next_free * 100) / get_dev_size(store->snap->cow->dev); +} + +int dm_create_transient(struct exception_store *store, + struct dm_snapshot *s, int blocksize, void **error) +{ + struct transient_c *tc; + + memset(store, 0, sizeof(*store)); + store->destroy = transient_destroy; + store->prepare_exception = transient_prepare; + store->commit_exception = transient_commit; + store->percent_full = transient_percentfull; + store->snap = s; + + tc = kmalloc(sizeof(struct transient_c), GFP_KERNEL); + if (!tc) + return -ENOMEM; + + tc->next_free = 0; + store->context = tc; + + return 0; +} diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/md/dm.h linux.21pre4-ac2/drivers/md/dm.h --- linux.21pre4/drivers/md/dm.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/md/dm.h 2003-02-04 15:54:23.000000000 +0000 @@ -0,0 +1,241 @@ +/* + * Internal header file for device mapper + * + * Copyright (C) 2001 Sistina Software + * + * This file is released under the LGPL. + */ + +#ifndef DM_INTERNAL_H +#define DM_INTERNAL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DM_NAME "device-mapper" /* Name for messaging */ +#define DM_DRIVER_EMAIL "lvm-devel@lists.sistina.com" +#define MAX_DEPTH 16 +#define NODE_SIZE L1_CACHE_BYTES +#define KEYS_PER_NODE (NODE_SIZE / sizeof(offset_t)) +#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) +#define MAX_ARGS 32 +#define MAX_DEVICES 256 + +/* + * List of devices that a metadevice uses and should open/close. + */ +struct dm_dev { + atomic_t count; + struct list_head list; + + int mode; + + kdev_t dev; + struct block_device *bd; +}; + +/* + * I/O that had to be deferred while we were suspended + */ +struct deferred_io { + int rw; + struct buffer_head *bh; + struct deferred_io *next; +}; + +/* + * Btree leaf - this does the actual mapping + */ +struct target { + struct target_type *type; + void *private; +}; + +/* + * The btree + */ +struct dm_table { + /* btree table */ + int depth; + int counts[MAX_DEPTH]; /* in nodes */ + offset_t *index[MAX_DEPTH]; + + int num_targets; + int num_allocated; + offset_t *highs; + struct target *targets; + + /* + * Indicates the rw permissions for the new logical + * device. This should be a combination of FMODE_READ + * and FMODE_WRITE. + */ + int mode; + + /* a list of devices used by this table */ + struct list_head devices; + + /* + * A waitqueue for processes waiting for something + * interesting to happen to this table. + */ + wait_queue_head_t eventq; +}; + +/* + * The actual device struct + */ +struct mapped_device { + kdev_t dev; + char name[DM_NAME_LEN]; + char *uuid; + + int use_count; + int suspended; + int read_only; + + /* a list of io's that arrived while we were suspended */ + atomic_t pending; + wait_queue_head_t wait; + struct deferred_io *deferred; + + struct dm_table *map; + + /* used by dm-fs.c */ + devfs_handle_t devfs_entry; +}; + +extern struct block_device_operations dm_blk_dops; + +/* dm-target.c */ +int dm_target_init(void); +struct target_type *dm_get_target_type(const char *name); +void dm_put_target_type(struct target_type *t); +void dm_target_exit(void); + +/* + * Destructively splits argument list to pass to ctr. + */ +int split_args(int max, int *argc, char **argv, char *input); + +/* dm.c */ +struct mapped_device *dm_get_r(int minor); +struct mapped_device *dm_get_w(int minor); + +/* + * There are two ways to lookup a device. + */ +enum { + DM_LOOKUP_BY_NAME, + DM_LOOKUP_BY_UUID +}; + +struct mapped_device *dm_get_name_r(const char *name, int nametype); +struct mapped_device *dm_get_name_w(const char *name, int nametype); + +void dm_put_r(struct mapped_device *md); +void dm_put_w(struct mapped_device *md); + +/* + * Call with no lock. + */ +int dm_create(const char *name, const char *uuid, int minor, int ro, + struct dm_table *table); +int dm_set_name(const char *name, int nametype, const char *newname); +void dm_destroy_all(void); + +/* + * You must have the write lock before calling the remaining md + * methods. + */ +int dm_destroy(struct mapped_device *md); +void dm_set_ro(struct mapped_device *md, int ro); + +/* + * The device must be suspended before calling this method. + */ +int dm_swap_table(struct mapped_device *md, struct dm_table *t); + +/* + * A device can still be used while suspended, but I/O is deferred. + */ +int dm_suspend(struct mapped_device *md); +int dm_resume(struct mapped_device *md); + +/* dm-table.c */ +int dm_table_create(struct dm_table **result, int mode); +void dm_table_destroy(struct dm_table *t); + +int dm_table_add_target(struct dm_table *t, offset_t highs, + struct target_type *type, void *private); +int dm_table_complete(struct dm_table *t); + +/* + * Event handling + */ +void dm_table_event(struct dm_table *t); + +#define DMWARN(f, x...) printk(KERN_WARNING DM_NAME ": " f "\n" , ## x) +#define DMERR(f, x...) printk(KERN_ERR DM_NAME ": " f "\n" , ## x) +#define DMINFO(f, x...) printk(KERN_INFO DM_NAME ": " f "\n" , ## x) + +/* + * Calculate the index of the child node of the n'th node k'th key. + */ +static inline int get_child(int n, int k) +{ + return (n * CHILDREN_PER_NODE) + k; +} + +/* + * Return the n'th node of level l from table t. + */ +static inline offset_t *get_node(struct dm_table *t, int l, int n) +{ + return t->index[l] + (n * KEYS_PER_NODE); +} + +static inline int array_too_big(unsigned long fixed, unsigned long obj, + unsigned long num) +{ + return (num > (ULONG_MAX - fixed) / obj); +} + + +/* + * Targets + */ +int dm_linear_init(void); +void dm_linear_exit(void); + +int dm_stripe_init(void); +void dm_stripe_exit(void); + +int dm_snapshot_init(void); +void dm_snapshot_exit(void); + + +/* + * Init functions for the user interface to device-mapper. At + * the moment an ioctl interface on a special char device is + * used. A filesystem based interface would be a nicer way to + * go. + */ +int __init dm_interface_init(void); +void dm_interface_exit(void); + + +#endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/md/dm-ioctl.c linux.21pre4-ac2/drivers/md/dm-ioctl.c --- linux.21pre4/drivers/md/dm-ioctl.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/md/dm-ioctl.c 2003-01-06 17:11:06.000000000 +0000 @@ -0,0 +1,830 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include +#include +#include +#include + +/*----------------------------------------------------------------- + * Implementation of the ioctl commands + *---------------------------------------------------------------*/ + +/* + * All the ioctl commands get dispatched to functions with this + * prototype. + */ +typedef int (*ioctl_fn)(struct dm_ioctl *param, struct dm_ioctl *user); + +/* + * This is really a debug only call. + */ +static int remove_all(struct dm_ioctl *param, struct dm_ioctl *user) +{ + dm_destroy_all(); + return 0; +} + +/* + * Check a string doesn't overrun the chunk of + * memory we copied from userland. + */ +static int valid_str(char *str, void *begin, void *end) +{ + while (((void *) str >= begin) && ((void *) str < end)) + if (!*str++) + return 0; + + return -EINVAL; +} + +static int next_target(struct dm_target_spec *last, uint32_t next, + void *begin, void *end, + struct dm_target_spec **spec, char **params) +{ + *spec = (struct dm_target_spec *) + ((unsigned char *) last + next); + *params = (char *) (*spec + 1); + + if (*spec < (last + 1) || ((void *) *spec > end)) + return -EINVAL; + + return valid_str(*params, begin, end); +} + +/* + * Checks to see if there's a gap in the table. + * Returns true iff there is a gap. + */ +static int gap(struct dm_table *table, struct dm_target_spec *spec) +{ + if (!table->num_targets) + return (spec->sector_start > 0) ? 1 : 0; + + if (spec->sector_start != table->highs[table->num_targets - 1] + 1) + return 1; + + return 0; +} + +static int populate_table(struct dm_table *table, struct dm_ioctl *args) +{ + int i = 0, r, first = 1, argc; + struct dm_target_spec *spec; + char *params, *argv[MAX_ARGS]; + struct target_type *ttype; + void *context, *begin, *end; + offset_t highs = 0; + + if (!args->target_count) { + DMWARN("populate_table: no targets specified"); + return -EINVAL; + } + + begin = (void *) args; + end = begin + args->data_size; + +#define PARSE_ERROR(msg) {DMWARN(msg); return -EINVAL;} + + for (i = 0; i < args->target_count; i++) { + + if (first) + r = next_target((struct dm_target_spec *) args, + args->data_start, + begin, end, &spec, ¶ms); + else + r = next_target(spec, spec->next, begin, end, + &spec, ¶ms); + + if (r) + PARSE_ERROR("unable to find target"); + + /* Look up the target type */ + ttype = dm_get_target_type(spec->target_type); + if (!ttype) + PARSE_ERROR("unable to find target type"); + + if (gap(table, spec)) + PARSE_ERROR("gap in target ranges"); + + /* Split up the parameter list */ + if (split_args(MAX_ARGS, &argc, argv, params) < 0) + PARSE_ERROR("Too many arguments"); + + /* Build the target */ + if (ttype->ctr(table, spec->sector_start, spec->length, + argc, argv, &context)) { + DMWARN("%s: target constructor failed", + (char *) context); + return -EINVAL; + } + + /* Add the target to the table */ + highs = spec->sector_start + (spec->length - 1); + if (dm_table_add_target(table, highs, ttype, context)) + PARSE_ERROR("internal error adding target to table"); + + first = 0; + } + +#undef PARSE_ERROR + + r = dm_table_complete(table); + return r; +} + +/* + * Round up the ptr to the next 'align' boundary. Obviously + * 'align' must be a power of 2. + */ +static inline void *align_ptr(void *ptr, unsigned int align) +{ + align--; + return (void *) (((unsigned long) (ptr + align)) & ~align); +} + +/* + * Copies a dm_ioctl and an optional additional payload to + * userland. + */ +static int results_to_user(struct dm_ioctl *user, struct dm_ioctl *param, + void *data, uint32_t len) +{ + int r; + void *ptr = NULL; + + if (data) { + ptr = align_ptr(user + 1, sizeof(unsigned long)); + param->data_start = ptr - (void *) user; + } + + /* + * The version number has already been filled in, so we + * just copy later fields. + */ + r = copy_to_user(&user->data_size, ¶m->data_size, + sizeof(*param) - sizeof(param->version)); + if (r) + return -EFAULT; + + if (data) { + if (param->data_start + len > param->data_size) + return -ENOSPC; + + if (copy_to_user(ptr, data, len)) + r = -EFAULT; + } + + return r; +} + +/* + * Fills in a dm_ioctl structure, ready for sending back to + * userland. + */ +static void __info(struct mapped_device *md, struct dm_ioctl *param) +{ + param->flags = DM_EXISTS_FLAG; + if (md->suspended) + param->flags |= DM_SUSPEND_FLAG; + if (md->read_only) + param->flags |= DM_READONLY_FLAG; + + strncpy(param->name, md->name, sizeof(param->name)); + + if (md->uuid) + strncpy(param->uuid, md->uuid, sizeof(param->uuid) - 1); + else + param->uuid[0] = '\0'; + + param->open_count = md->use_count; + param->dev = kdev_t_to_nr(md->dev); + param->target_count = md->map->num_targets; +} + +/* + * Always use UUID for lookups if it's present, otherwise use name. + */ +static inline char *lookup_name(struct dm_ioctl *param) +{ + return (*param->uuid) ? param->uuid : param->name; +} + +static inline int lookup_type(struct dm_ioctl *param) +{ + return (*param->uuid) ? DM_LOOKUP_BY_UUID : DM_LOOKUP_BY_NAME; +} + +#define ALIGNMENT sizeof(int) +static void *_align(void *ptr, unsigned int a) +{ + register unsigned long align = --a; + + return (void *) (((unsigned long) ptr + align) & ~align); +} + +/* + * Copies device info back to user space, used by + * the create and info ioctls. + */ +static int info(struct dm_ioctl *param, struct dm_ioctl *user) +{ + struct mapped_device *md; + + param->flags = 0; + + md = dm_get_name_r(lookup_name(param), lookup_type(param)); + if (!md) + /* + * Device not found - returns cleared exists flag. + */ + goto out; + + __info(md, param); + dm_put_r(md); + + out: + return results_to_user(user, param, NULL, 0); +} + +static inline int get_mode(struct dm_ioctl *param) +{ + int mode = FMODE_READ | FMODE_WRITE; + + if (param->flags & DM_READONLY_FLAG) + mode = FMODE_READ; + + return mode; +} + +static int create(struct dm_ioctl *param, struct dm_ioctl *user) +{ + int r, ro; + struct dm_table *t; + int minor; + + r = dm_table_create(&t, get_mode(param)); + if (r) + return r; + + r = populate_table(t, param); + if (r) { + dm_table_destroy(t); + return r; + } + + minor = (param->flags & DM_PERSISTENT_DEV_FLAG) ? + MINOR(to_kdev_t(param->dev)) : -1; + + ro = (param->flags & DM_READONLY_FLAG) ? 1 : 0; + + r = dm_create(param->name, param->uuid, minor, ro, t); + if (r) { + dm_table_destroy(t); + return r; + } + + r = info(param, user); + return r; +} + + + +/* + * Build up the status struct for each target + */ +static int __status(struct mapped_device *md, struct dm_ioctl *param, + char *outbuf, int *len) +{ + int i; + struct dm_target_spec *spec; + uint64_t sector = 0LL; + char *outptr; + status_type_t type; + + if (param->flags & DM_STATUS_TABLE_FLAG) + type = STATUSTYPE_TABLE; + else + type = STATUSTYPE_INFO; + + outptr = outbuf; + + /* Get all the target info */ + for (i = 0; i < md->map->num_targets; i++) { + struct target_type *tt = md->map->targets[i].type; + offset_t high = md->map->highs[i]; + + if (outptr - outbuf + + sizeof(struct dm_target_spec) > param->data_size) + return -ENOMEM; + + spec = (struct dm_target_spec *) outptr; + + spec->status = 0; + spec->sector_start = sector; + spec->length = high - sector + 1; + strncpy(spec->target_type, tt->name, sizeof(spec->target_type)); + + outptr += sizeof(struct dm_target_spec); + + /* Get the status/table string from the target driver */ + if (tt->status) + tt->status(type, outptr, + outbuf + param->data_size - outptr, + md->map->targets[i].private); + else + outptr[0] = '\0'; + + outptr += strlen(outptr) + 1; + _align(outptr, ALIGNMENT); + + sector = high + 1; + + spec->next = outptr - outbuf; + } + + param->target_count = md->map->num_targets; + *len = outptr - outbuf; + + return 0; +} + +/* + * Return the status of a device as a text string for each + * target. + */ +static int get_status(struct dm_ioctl *param, struct dm_ioctl *user) +{ + struct mapped_device *md; + int len = 0; + int ret; + char *outbuf = NULL; + + md = dm_get_name_r(lookup_name(param), lookup_type(param)); + if (!md) + /* + * Device not found - returns cleared exists flag. + */ + goto out; + + /* We haven't a clue how long the resultant data will be so + just allocate as much as userland has allowed us and make sure + we don't overun it */ + outbuf = kmalloc(param->data_size, GFP_KERNEL); + if (!outbuf) + goto out; + /* + * Get the status of all targets + */ + __status(md, param, outbuf, &len); + + /* + * Setup the basic dm_ioctl structure. + */ + __info(md, param); + + out: + if (md) + dm_put_r(md); + + ret = results_to_user(user, param, outbuf, len); + + if (outbuf) + kfree(outbuf); + + return ret; +} + +/* + * Wait for a device to report an event + */ +static int wait_device_event(struct dm_ioctl *param, struct dm_ioctl *user) +{ + struct mapped_device *md; + DECLARE_WAITQUEUE(wq, current); + + md = dm_get_name_r(lookup_name(param), lookup_type(param)); + if (!md) + /* + * Device not found - returns cleared exists flag. + */ + goto out; + /* + * Setup the basic dm_ioctl structure. + */ + __info(md, param); + + /* + * Wait for a notification event + */ + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&md->map->eventq, &wq); + + dm_put_r(md); + + schedule(); + set_current_state(TASK_RUNNING); + + out: + return results_to_user(user, param, NULL, 0); +} + +/* + * Retrieves a list of devices used by a particular dm device. + */ +static int dep(struct dm_ioctl *param, struct dm_ioctl *user) +{ + int count, r; + struct mapped_device *md; + struct list_head *tmp; + size_t len = 0; + struct dm_target_deps *deps = NULL; + + md = dm_get_name_r(lookup_name(param), lookup_type(param)); + if (!md) + goto out; + + /* + * Setup the basic dm_ioctl structure. + */ + __info(md, param); + + /* + * Count the devices. + */ + count = 0; + list_for_each(tmp, &md->map->devices) + count++; + + /* + * Allocate a kernel space version of the dm_target_status + * struct. + */ + if (array_too_big(sizeof(*deps), sizeof(*deps->dev), count)) { + dm_put_r(md); + return -ENOMEM; + } + + len = sizeof(*deps) + (sizeof(*deps->dev) * count); + deps = kmalloc(len, GFP_KERNEL); + if (!deps) { + dm_put_r(md); + return -ENOMEM; + } + + /* + * Fill in the devices. + */ + deps->count = count; + count = 0; + list_for_each(tmp, &md->map->devices) { + struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); + deps->dev[count++] = kdev_t_to_nr(dd->dev); + } + dm_put_r(md); + + out: + r = results_to_user(user, param, deps, len); + + kfree(deps); + return r; +} + +static int remove(struct dm_ioctl *param, struct dm_ioctl *user) +{ + int r; + struct mapped_device *md; + + md = dm_get_name_w(lookup_name(param), lookup_type(param)); + if (!md) + return -ENXIO; + + r = dm_destroy(md); + dm_put_w(md); + if (!r) + kfree(md); + + return r; +} + +static int suspend(struct dm_ioctl *param, struct dm_ioctl *user) +{ + int r; + struct mapped_device *md; + + md = dm_get_name_w(lookup_name(param), lookup_type(param)); + if (!md) + return -ENXIO; + + r = (param->flags & DM_SUSPEND_FLAG) ? dm_suspend(md) : dm_resume(md); + dm_put_w(md); + + return r; +} + +static int reload(struct dm_ioctl *param, struct dm_ioctl *user) +{ + int r; + struct mapped_device *md; + struct dm_table *t; + + r = dm_table_create(&t, get_mode(param)); + if (r) + return r; + + r = populate_table(t, param); + if (r) { + dm_table_destroy(t); + return r; + } + + md = dm_get_name_w(lookup_name(param), lookup_type(param)); + if (!md) { + dm_table_destroy(t); + return -ENXIO; + } + + r = dm_swap_table(md, t); + if (r) { + dm_put_w(md); + dm_table_destroy(t); + return r; + } + + dm_set_ro(md, (param->flags & DM_READONLY_FLAG) ? 1 : 0); + dm_put_w(md); + + r = info(param, user); + return r; +} + +static int rename(struct dm_ioctl *param, struct dm_ioctl *user) +{ + char *newname = (char *) param + param->data_start; + + if (valid_str(newname, (void *) param, + (void *) param + param->data_size) || + dm_set_name(lookup_name(param), lookup_type(param), newname)) { + DMWARN("Invalid new logical volume name supplied."); + return -EINVAL; + } + + return 0; +} + + +/*----------------------------------------------------------------- + * Implementation of open/close/ioctl on the special char + * device. + *---------------------------------------------------------------*/ +static int ctl_open(struct inode *inode, struct file *file) +{ + /* only root can open this */ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + MOD_INC_USE_COUNT; + + return 0; +} + +static int ctl_close(struct inode *inode, struct file *file) +{ + MOD_DEC_USE_COUNT; + return 0; +} + +static ioctl_fn lookup_ioctl(unsigned int cmd) +{ + static struct { + int cmd; + ioctl_fn fn; + } _ioctls[] = { + {DM_VERSION_CMD, NULL}, /* version is dealt with elsewhere */ + {DM_REMOVE_ALL_CMD, remove_all}, + {DM_DEV_CREATE_CMD, create}, + {DM_DEV_REMOVE_CMD, remove}, + {DM_DEV_RELOAD_CMD, reload}, + {DM_DEV_RENAME_CMD, rename}, + {DM_DEV_SUSPEND_CMD, suspend}, + {DM_DEV_DEPS_CMD, dep}, + {DM_DEV_STATUS_CMD, info}, + {DM_TARGET_STATUS_CMD, get_status}, + {DM_TARGET_WAIT_CMD, wait_device_event}, + }; + static int nelts = sizeof(_ioctls) / sizeof(*_ioctls); + + return (cmd >= nelts) ? NULL : _ioctls[cmd].fn; +} + +/* + * As well as checking the version compatibility this always + * copies the kernel interface version out. + */ +static int check_version(int cmd, struct dm_ioctl *user) +{ + uint32_t version[3]; + int r = 0; + + if (copy_from_user(version, user->version, sizeof(version))) + return -EFAULT; + + if ((DM_VERSION_MAJOR != version[0]) || + (DM_VERSION_MINOR < version[1])) { + DMWARN("ioctl interface mismatch: " + "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", + DM_VERSION_MAJOR, DM_VERSION_MINOR, + DM_VERSION_PATCHLEVEL, + version[0], version[1], version[2], cmd); + r = -EINVAL; + } + + /* + * Fill in the kernel version. + */ + version[0] = DM_VERSION_MAJOR; + version[1] = DM_VERSION_MINOR; + version[2] = DM_VERSION_PATCHLEVEL; + if (copy_to_user(user->version, version, sizeof(version))) + return -EFAULT; + + return r; +} + +static void free_params(struct dm_ioctl *param) +{ + vfree(param); +} + +static int copy_params(struct dm_ioctl *user, struct dm_ioctl **param) +{ + struct dm_ioctl tmp, *dmi; + + if (copy_from_user(&tmp, user, sizeof(tmp))) + return -EFAULT; + + if (tmp.data_size < sizeof(tmp) || tmp.data_size > 65536) + return -EINVAL; + + dmi = (struct dm_ioctl *) vmalloc(tmp.data_size); + if (!dmi) + return -ENOMEM; + + if (copy_from_user(dmi, user, tmp.data_size)) { + vfree(dmi); + return -EFAULT; + } + + *param = dmi; + return 0; +} + +static int validate_params(uint cmd, struct dm_ioctl *param) +{ + /* Ignores parameters */ + if (cmd == DM_REMOVE_ALL_CMD) + return 0; + + /* Unless creating, either name of uuid but not both */ + if (cmd != DM_DEV_CREATE_CMD) { + if ((!*param->uuid && !*param->name) || + (*param->uuid && *param->name)) { + DMWARN("one of name or uuid must be supplied"); + return -EINVAL; + } + } + + /* Ensure strings are terminated */ + param->name[DM_NAME_LEN - 1] = '\0'; + param->uuid[DM_UUID_LEN - 1] = '\0'; + + return 0; +} + +static int ctl_ioctl(struct inode *inode, struct file *file, + uint command, ulong u) +{ + + int r = 0, cmd; + struct dm_ioctl *param; + struct dm_ioctl *user = (struct dm_ioctl *) u; + ioctl_fn fn = NULL; + + if (_IOC_TYPE(command) != DM_IOCTL) + return -ENOTTY; + + cmd = _IOC_NR(command); + + /* + * Check the interface version passed in. This also + * writes out the kernel's interface version. + */ + r = check_version(cmd, user); + if (r) + return r; + + /* + * Nothing more to do for the version command. + */ + if (cmd == DM_VERSION_CMD) + return 0; + + fn = lookup_ioctl(cmd); + if (!fn) { + DMWARN("dm_ctl_ioctl: unknown command 0x%x", command); + return -ENOTTY; + } + + /* + * Copy the parameters into kernel space. + */ + r = copy_params(user, ¶m); + if (r) + return r; + + r = validate_params(cmd, param); + if (r) { + free_params(param); + return r; + } + + r = fn(param, user); + free_params(param); + return r; +} + +static struct file_operations _ctl_fops = { + open: ctl_open, + release: ctl_close, + ioctl: ctl_ioctl, + owner: THIS_MODULE, +}; + +static devfs_handle_t _ctl_handle; + +static struct miscdevice _dm_misc = { + minor: MISC_DYNAMIC_MINOR, + name: DM_NAME, + fops: &_ctl_fops +}; + +static int __init dm_devfs_init(void) { + int r; + char rname[64]; + + r = devfs_generate_path(_dm_misc.devfs_handle, rname + 3, + sizeof rname - 3); + if (r == -ENOSYS) + return 0; /* devfs not present */ + + if (r < 0) { + DMERR("devfs_generate_path failed for control device"); + return r; + } + + strncpy(rname + r, "../", 3); + r = devfs_mk_symlink(NULL, DM_DIR "/control", + DEVFS_FL_DEFAULT, rname + r, &_ctl_handle, NULL); + if (r) { + DMERR("devfs_mk_symlink failed for control device"); + return r; + } + devfs_auto_unregister(_dm_misc.devfs_handle, _ctl_handle); + + return 0; +} + +/* Create misc character device and link to DM_DIR/control */ +int __init dm_interface_init(void) +{ + int r; + + r = misc_register(&_dm_misc); + if (r) { + DMERR("misc_register failed for control device"); + return r; + } + + r = dm_devfs_init(); + if (r) { + misc_deregister(&_dm_misc); + return r; + } + + DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR, + DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA, + DM_DRIVER_EMAIL); + + return 0; +} + +void dm_interface_exit(void) +{ + if (misc_deregister(&_dm_misc) < 0) + DMERR("misc_deregister failed for control device"); +} diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/md/dm-linear.c linux.21pre4-ac2/drivers/md/dm-linear.c --- linux.21pre4/drivers/md/dm-linear.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/md/dm-linear.c 2003-01-06 17:11:06.000000000 +0000 @@ -0,0 +1,125 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include +#include +#include + +/* + * Linear: maps a linear range of a device. + */ +struct linear_c { + long delta; /* FIXME: we need a signed offset type */ + long start; /* For display only */ + struct dm_dev *dev; +}; + +/* + * Construct a linear mapping: + */ +static int linear_ctr(struct dm_table *t, offset_t b, offset_t l, + int argc, char **argv, void **context) +{ + struct linear_c *lc; + unsigned long start; /* FIXME: unsigned long long */ + char *end; + + if (argc != 2) { + *context = "dm-linear: Not enough arguments"; + return -EINVAL; + } + + lc = kmalloc(sizeof(*lc), GFP_KERNEL); + if (lc == NULL) { + *context = "dm-linear: Cannot allocate linear context"; + return -ENOMEM; + } + + start = simple_strtoul(argv[1], &end, 10); + if (*end) { + *context = "dm-linear: Invalid device sector"; + goto bad; + } + + if (dm_table_get_device(t, argv[0], start, l, t->mode, &lc->dev)) { + *context = "dm-linear: Device lookup failed"; + goto bad; + } + + lc->delta = (int) start - (int) b; + lc->start = start; + *context = lc; + return 0; + + bad: + kfree(lc); + return -EINVAL; +} + +static void linear_dtr(struct dm_table *t, void *c) +{ + struct linear_c *lc = (struct linear_c *) c; + + dm_table_put_device(t, lc->dev); + kfree(c); +} + +static int linear_map(struct buffer_head *bh, int rw, void *context) +{ + struct linear_c *lc = (struct linear_c *) context; + + bh->b_rdev = lc->dev->dev; + bh->b_rsector = bh->b_rsector + lc->delta; + + return 1; +} + +static int linear_status(status_type_t type, char *result, int maxlen, + void *context) +{ + struct linear_c *lc = (struct linear_c *) context; + + switch (type) { + case STATUSTYPE_INFO: + result[0] = '\0'; + break; + + case STATUSTYPE_TABLE: + snprintf(result, maxlen, "%s %ld", kdevname(lc->dev->dev), + lc->start); + break; + } + return 0; +} + +static struct target_type linear_target = { + name: "linear", + module: THIS_MODULE, + ctr: linear_ctr, + dtr: linear_dtr, + map: linear_map, + status: linear_status, +}; + +int __init dm_linear_init(void) +{ + int r = dm_register_target(&linear_target); + + if (r < 0) + DMERR("linear: register failed %d", r); + + return r; +} + +void dm_linear_exit(void) +{ + int r = dm_unregister_target(&linear_target); + + if (r < 0) + DMERR("linear: unregister failed %d", r); +} diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/md/dm-snapshot.c linux.21pre4-ac2/drivers/md/dm-snapshot.c --- linux.21pre4/drivers/md/dm-snapshot.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/md/dm-snapshot.c 2003-01-06 17:11:06.000000000 +0000 @@ -0,0 +1,1169 @@ +/* + * dm-snapshot.c + * + * Copyright (C) 2001-2002 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dm-snapshot.h" +#include "kcopyd.h" + +/* + * FIXME: Remove this before release. + */ +#if 0 +#define DMDEBUG(x...) DMWARN( ## x) +#else +#define DMDEBUG(x...) +#endif + +/* + * The percentage increment we will wake up users at + */ +#define WAKE_UP_PERCENT 5 + +/* + * Hard sector size used all over the kernel + */ +#define SECTOR_SIZE 512 + +/* + * kcopyd priority of snapshot operations + */ +#define SNAPSHOT_COPY_PRIORITY 2 + +struct pending_exception { + struct exception e; + + /* + * Origin buffers waiting for this to complete are held + * in a list (using b_reqnext). + */ + struct buffer_head *origin_bhs; + struct buffer_head *snapshot_bhs; + + /* + * Other pending_exceptions that are processing this + * chunk. When this list is empty, we know we can + * complete the origins. + */ + struct list_head siblings; + + /* Pointer back to snapshot context */ + struct dm_snapshot *snap; + + /* + * 1 indicates the exception has already been sent to + * kcopyd. + */ + int started; +}; + +/* + * Hash table mapping origin volumes to lists of snapshots and + * a lock to protect it + */ +static kmem_cache_t *exception_cache; +static kmem_cache_t *pending_cache; +static mempool_t *pending_pool; + +/* + * One of these per registered origin, held in the snapshot_origins hash + */ +struct origin { + /* The origin device */ + kdev_t dev; + + struct list_head hash_list; + + /* List of snapshots for this origin */ + struct list_head snapshots; +}; + +/* + * Size of the hash table for origin volumes. If we make this + * the size of the minors list then it should be nearly perfect + */ +#define ORIGIN_HASH_SIZE 256 +#define ORIGIN_MASK 0xFF +static struct list_head *_origins; +static struct rw_semaphore _origins_lock; + +static int init_origin_hash(void) +{ + int i; + + _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), + GFP_KERNEL); + if (!_origins) { + DMERR("Device mapper: Snapshot: unable to allocate memory"); + return -ENOMEM; + } + + for (i = 0; i < ORIGIN_HASH_SIZE; i++) + INIT_LIST_HEAD(_origins + i); + init_rwsem(&_origins_lock); + + return 0; +} + +static void exit_origin_hash(void) +{ + kfree(_origins); +} + +static inline unsigned int origin_hash(kdev_t dev) +{ + return MINOR(dev) & ORIGIN_MASK; +} + +static struct origin *__lookup_origin(kdev_t origin) +{ + struct list_head *slist; + struct list_head *ol; + struct origin *o; + + ol = &_origins[origin_hash(origin)]; + list_for_each(slist, ol) { + o = list_entry(slist, struct origin, hash_list); + + if (o->dev == origin) + return o; + } + + return NULL; +} + +static void __insert_origin(struct origin *o) +{ + struct list_head *sl = &_origins[origin_hash(o->dev)]; + list_add_tail(&o->hash_list, sl); +} + +/* + * Make a note of the snapshot and its origin so we can look it + * up when the origin has a write on it. + */ +static int register_snapshot(struct dm_snapshot *snap) +{ + struct origin *o; + kdev_t dev = snap->origin->dev; + + down_write(&_origins_lock); + o = __lookup_origin(dev); + + if (!o) { + /* New origin */ + o = kmalloc(sizeof(*o), GFP_KERNEL); + if (!o) { + up_write(&_origins_lock); + return -ENOMEM; + } + + /* Initialise the struct */ + INIT_LIST_HEAD(&o->snapshots); + o->dev = dev; + + __insert_origin(o); + } + + list_add_tail(&snap->list, &o->snapshots); + + up_write(&_origins_lock); + return 0; +} + +static void unregister_snapshot(struct dm_snapshot *s) +{ + struct origin *o; + + down_write(&_origins_lock); + o = __lookup_origin(s->origin->dev); + + list_del(&s->list); + if (list_empty(&o->snapshots)) { + list_del(&o->hash_list); + kfree(o); + } + + up_write(&_origins_lock); +} + +/* + * Implementation of the exception hash tables. + */ +static int init_exception_table(struct exception_table *et, uint32_t size) +{ + int i; + + et->hash_mask = size - 1; + et->table = vcalloc(size, sizeof(struct list_head)); + if (!et->table) + return -ENOMEM; + + for (i = 0; i < size; i++) + INIT_LIST_HEAD(et->table + i); + + return 0; +} + +static void exit_exception_table(struct exception_table *et, kmem_cache_t *mem) +{ + struct list_head *slot, *entry, *temp; + struct exception *ex; + int i, size; + + size = et->hash_mask + 1; + for (i = 0; i < size; i++) { + slot = et->table + i; + + list_for_each_safe(entry, temp, slot) { + ex = list_entry(entry, struct exception, hash_list); + kmem_cache_free(mem, ex); + } + } + + vfree(et->table); +} + +/* + * FIXME: check how this hash fn is performing. + */ +static inline uint32_t exception_hash(struct exception_table *et, chunk_t chunk) +{ + return chunk & et->hash_mask; +} + +static void insert_exception(struct exception_table *eh, struct exception *e) +{ + struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; + list_add(&e->hash_list, l); +} + +static inline void remove_exception(struct exception *e) +{ + list_del(&e->hash_list); +} + +/* + * Return the exception data for a sector, or NULL if not + * remapped. + */ +static struct exception *lookup_exception(struct exception_table *et, + chunk_t chunk) +{ + struct list_head *slot, *el; + struct exception *e; + + slot = &et->table[exception_hash(et, chunk)]; + list_for_each(el, slot) { + e = list_entry(el, struct exception, hash_list); + if (e->old_chunk == chunk) + return e; + } + + return NULL; +} + +static inline struct exception *alloc_exception(void) +{ + struct exception *e; + + e = kmem_cache_alloc(exception_cache, GFP_NOIO); + if (!e) + e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); + + return e; +} + +static inline void free_exception(struct exception *e) +{ + kmem_cache_free(exception_cache, e); +} + +static inline struct pending_exception *alloc_pending_exception(void) +{ + return mempool_alloc(pending_pool, GFP_NOIO); +} + +static inline void free_pending_exception(struct pending_exception *pe) +{ + mempool_free(pe, pending_pool); +} + +int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) +{ + struct exception *e; + + e = alloc_exception(); + if (!e) + return -ENOMEM; + + e->old_chunk = old; + e->new_chunk = new; + insert_exception(&s->complete, e); + return 0; +} + +/* + * Hard coded magic. + */ +static int calc_max_buckets(void) +{ + unsigned long mem; + + mem = num_physpages << PAGE_SHIFT; + mem /= 50; + mem /= sizeof(struct list_head); + + return mem; +} + +/* + * Rounds a number down to a power of 2. + */ +static inline uint32_t round_down(uint32_t n) +{ + while (n & (n - 1)) + n &= (n - 1); + return n; +} + +/* + * Allocate room for a suitable hash table. + */ +static int init_hash_tables(struct dm_snapshot *s) +{ + offset_t hash_size, cow_dev_size, origin_dev_size, max_buckets; + + /* + * Calculate based on the size of the original volume or + * the COW volume... + */ + cow_dev_size = get_dev_size(s->cow->dev); + origin_dev_size = get_dev_size(s->origin->dev); + max_buckets = calc_max_buckets(); + + hash_size = min(origin_dev_size, cow_dev_size) / s->chunk_size; + hash_size = min(hash_size, max_buckets); + + /* Round it down to a power of 2 */ + hash_size = round_down(hash_size); + if (init_exception_table(&s->complete, hash_size)) + return -ENOMEM; + + /* + * Allocate hash table for in-flight exceptions + * Make this smaller than the real hash table + */ + hash_size >>= 3; + if (!hash_size) + hash_size = 64; + + if (init_exception_table(&s->pending, hash_size)) { + exit_exception_table(&s->complete, exception_cache); + return -ENOMEM; + } + + return 0; +} + +/* + * Round a number up to the nearest 'size' boundary. size must + * be a power of 2. + */ +static inline ulong round_up(ulong n, ulong size) +{ + size--; + return (n + size) & ~size; +} + +/* + * Construct a snapshot mapping:

+ */ +static int snapshot_ctr(struct dm_table *t, offset_t b, offset_t l, + int argc, char **argv, void **context) +{ + struct dm_snapshot *s; + unsigned long chunk_size; + int r = -EINVAL; + char *persistent; + char *origin_path; + char *cow_path; + char *value; + int blocksize; + + if (argc < 4) { + *context = "dm-snapshot: requires exactly 4 arguments"; + r = -EINVAL; + goto bad; + } + + origin_path = argv[0]; + cow_path = argv[1]; + persistent = argv[2]; + + if ((*persistent & 0x5f) != 'P' && (*persistent & 0x5f) != 'N') { + *context = "Persistent flag is not P or N"; + r = -EINVAL; + goto bad; + } + + chunk_size = simple_strtoul(argv[3], &value, 10); + if (chunk_size == 0 || value == NULL) { + *context = "Invalid chunk size"; + r = -EINVAL; + goto bad; + } + + s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s == NULL) { + *context = "Cannot allocate snapshot context private structure"; + r = -ENOMEM; + goto bad; + } + + r = dm_table_get_device(t, origin_path, 0, 0, FMODE_READ, &s->origin); + if (r) { + *context = "Cannot get origin device"; + goto bad_free; + } + + r = dm_table_get_device(t, cow_path, 0, 0, + FMODE_READ | FMODE_WRITE, &s->cow); + if (r) { + dm_table_put_device(t, s->origin); + *context = "Cannot get COW device"; + goto bad_free; + } + + /* + * Chunk size must be multiple of page size. Silently + * round up if it's not. + */ + chunk_size = round_up(chunk_size, PAGE_SIZE / SECTOR_SIZE); + + /* Validate the chunk size against the device block size */ + blocksize = get_hardsect_size(s->cow->dev); + if (chunk_size % (blocksize / SECTOR_SIZE)) { + *context = "Chunk size is not a multiple of device blocksize"; + r = -EINVAL; + goto bad_putdev; + } + + /* Check the sizes are small enough to fit in one kiovec */ + if (chunk_size > KIO_MAX_SECTORS) { + *context = "Chunk size is too big"; + r = -EINVAL; + goto bad_putdev; + } + + /* Check chunk_size is a power of 2 */ + if (chunk_size & (chunk_size - 1)) { + *context = "Chunk size is not a power of 2"; + r = -EINVAL; + goto bad_putdev; + } + + s->chunk_size = chunk_size; + s->chunk_mask = chunk_size - 1; + s->type = *persistent; + for (s->chunk_shift = 0; chunk_size; + s->chunk_shift++, chunk_size >>= 1) + ; + s->chunk_shift--; + + s->valid = 1; + s->last_percent = 0; + s->table = t; + init_rwsem(&s->lock); + + /* Allocate hash table for COW data */ + if (init_hash_tables(s)) { + *context = "Unable to allocate hash table space"; + r = -ENOMEM; + goto bad_putdev; + } + + /* + * Check the persistent flag - done here because we need the iobuf + * to check the LV header + */ + s->store.snap = s; + + if ((*persistent & 0x5f) == 'P') + r = dm_create_persistent(&s->store, s->chunk_size); + else + r = dm_create_transient(&s->store, s, blocksize, context); + + if (r) { + *context = "Couldn't create exception store"; + r = -EINVAL; + goto bad_free1; + } + + /* Flush IO to the origin device */ +#if LVM_VFS_ENHANCEMENT + fsync_dev_lockfs(s->origin->dev); +#else + fsync_dev(s->origin->dev); +#endif + + /* Add snapshot to the list of snapshots for this origin */ + if (register_snapshot(s)) { + r = -EINVAL; + *context = "Cannot register snapshot origin"; + goto bad_free2; + } +#if LVM_VFS_ENHANCEMENT + unlockfs(s->origin->dev); +#endif + kcopyd_inc_client_count(); + + *context = s; + return 0; + + bad_free2: + s->store.destroy(&s->store); + + bad_free1: + exit_exception_table(&s->pending, pending_cache); + exit_exception_table(&s->complete, exception_cache); + + bad_putdev: + dm_table_put_device(t, s->cow); + dm_table_put_device(t, s->origin); + + bad_free: + kfree(s); + + bad: + return r; +} + +static void snapshot_dtr(struct dm_table *t, void *context) +{ + struct dm_snapshot *s = (struct dm_snapshot *) context; + + dm_table_event(s->table); + + unregister_snapshot(s); + + exit_exception_table(&s->pending, pending_cache); + exit_exception_table(&s->complete, exception_cache); + + /* Deallocate memory used */ + s->store.destroy(&s->store); + + dm_table_put_device(t, s->origin); + dm_table_put_device(t, s->cow); + kfree(s); + + kcopyd_dec_client_count(); +} + +/* + * We hold lists of buffer_heads, using the b_reqnext field. + */ +static void queue_buffer(struct buffer_head **queue, struct buffer_head *bh) +{ + bh->b_reqnext = *queue; + *queue = bh; +} + +/* + * Flush a list of buffers. + */ +static void flush_buffers(struct buffer_head *bh) +{ + struct buffer_head *n; + + DMDEBUG("begin flush"); + while (bh) { + n = bh->b_reqnext; + bh->b_reqnext = NULL; + DMDEBUG("flushing %p", bh); + generic_make_request(WRITE, bh); + bh = n; + } + + run_task_queue(&tq_disk); +} + +/* + * Error a list of buffers. + */ +static void error_buffers(struct buffer_head *bh) +{ + struct buffer_head *n; + + while (bh) { + n = bh->b_reqnext; + bh->b_reqnext = NULL; + buffer_IO_error(bh); + bh = n; + } +} + +static void pending_complete(struct pending_exception *pe, int success) +{ + struct exception *e; + struct dm_snapshot *s = pe->snap; + + if (success) { + e = alloc_exception(); + if (!e) { + printk("Unable to allocate exception."); + down_write(&s->lock); + s->store.drop_snapshot(&s->store); + s->valid = 0; + up_write(&s->lock); + return; + } + + /* + * Add a proper exception, and remove the + * inflight exception from the list. + */ + down_write(&s->lock); + + memcpy(e, &pe->e, sizeof(*e)); + insert_exception(&s->complete, e); + remove_exception(&pe->e); + + /* Submit any pending write BHs */ + up_write(&s->lock); + + flush_buffers(pe->snapshot_bhs); + DMDEBUG("Exception completed successfully."); + + /* Notify any interested parties */ + if (s->store.percent_full) { + int pc = s->store.percent_full(&s->store); + + if (pc >= s->last_percent + WAKE_UP_PERCENT) { + dm_table_event(s->table); + s->last_percent = pc - pc % WAKE_UP_PERCENT; + } + } + + } else { + /* Read/write error - snapshot is unusable */ + DMERR("Error reading/writing snapshot"); + + down_write(&s->lock); + s->store.drop_snapshot(&s->store); + s->valid = 0; + remove_exception(&pe->e); + up_write(&s->lock); + + error_buffers(pe->snapshot_bhs); + + dm_table_event(s->table); + DMDEBUG("Exception failed."); + } + + if (list_empty(&pe->siblings)) + flush_buffers(pe->origin_bhs); + else + list_del(&pe->siblings); + + free_pending_exception(pe); +} + +static void commit_callback(void *context, int success) +{ + struct pending_exception *pe = (struct pending_exception *) context; + pending_complete(pe, success); +} + +/* + * Called when the copy I/O has finished. kcopyd actually runs + * this code so don't block. + */ +static void copy_callback(int err, void *context) +{ + struct pending_exception *pe = (struct pending_exception *) context; + struct dm_snapshot *s = pe->snap; + + if (err) + pending_complete(pe, 0); + + else + /* Update the metadata if we are persistent */ + s->store.commit_exception(&s->store, &pe->e, commit_callback, + pe); +} + +/* + * Dispatches the copy operation to kcopyd. + */ +static inline void start_copy(struct pending_exception *pe) +{ + struct dm_snapshot *s = pe->snap; + struct kcopyd_region src, dest; + + src.dev = s->origin->dev; + src.sector = chunk_to_sector(s, pe->e.old_chunk); + src.count = s->chunk_size; + + dest.dev = s->cow->dev; + dest.sector = chunk_to_sector(s, pe->e.new_chunk); + dest.count = s->chunk_size; + + if (!pe->started) { + /* Hand over to kcopyd */ + kcopyd_copy(&src, &dest, copy_callback, pe); + pe->started = 1; + } +} + +/* + * Looks to see if this snapshot already has a pending exception + * for this chunk, otherwise it allocates a new one and inserts + * it into the pending table. + */ +static struct pending_exception *find_pending_exception(struct dm_snapshot *s, + struct buffer_head *bh) +{ + struct exception *e; + struct pending_exception *pe; + chunk_t chunk = sector_to_chunk(s, bh->b_rsector); + + /* + * Is there a pending exception for this already ? + */ + e = lookup_exception(&s->pending, chunk); + if (e) { + /* cast the exception to a pending exception */ + pe = list_entry(e, struct pending_exception, e); + + } else { + /* Create a new pending exception */ + pe = alloc_pending_exception(); + if (!pe) { + DMWARN("Couldn't allocate pending exception."); + return NULL; + } + + pe->e.old_chunk = chunk; + pe->origin_bhs = pe->snapshot_bhs = NULL; + INIT_LIST_HEAD(&pe->siblings); + pe->snap = s; + pe->started = 0; + + if (s->store.prepare_exception(&s->store, &pe->e)) { + free_pending_exception(pe); + s->valid = 0; + return NULL; + } + + insert_exception(&s->pending, &pe->e); + } + + return pe; +} + +static inline void remap_exception(struct dm_snapshot *s, struct exception *e, + struct buffer_head *bh) +{ + bh->b_rdev = s->cow->dev; + bh->b_rsector = chunk_to_sector(s, e->new_chunk) + + (bh->b_rsector & s->chunk_mask); +} + +static int snapshot_map(struct buffer_head *bh, int rw, void *context) +{ + struct exception *e; + struct dm_snapshot *s = (struct dm_snapshot *) context; + int r = 1; + chunk_t chunk; + struct pending_exception *pe; + + chunk = sector_to_chunk(s, bh->b_rsector); + + /* Full snapshots are not usable */ + if (!s->valid) + return -1; + + /* + * Write to snapshot - higher level takes care of RW/RO + * flags so we should only get this if we are + * writeable. + */ + if (rw == WRITE) { + + down_write(&s->lock); + + /* If the block is already remapped - use that, else remap it */ + e = lookup_exception(&s->complete, chunk); + if (e) + remap_exception(s, e, bh); + + else { + pe = find_pending_exception(s, bh); + + if (!pe) { + s->store.drop_snapshot(&s->store); + s->valid = 0; + } + + queue_buffer(&pe->snapshot_bhs, bh); + start_copy(pe); + r = 0; + } + + up_write(&s->lock); + + } else { + /* + * FIXME: this read path scares me because we + * always use the origin when we have a pending + * exception. However I can't think of a + * situation where this is wrong - ejt. + */ + + /* Do reads */ + down_read(&s->lock); + + /* See if it it has been remapped */ + e = lookup_exception(&s->complete, chunk); + if (e) + remap_exception(s, e, bh); + else + bh->b_rdev = s->origin->dev; + + up_read(&s->lock); + } + + return r; +} + +static void list_merge(struct list_head *l1, struct list_head *l2) +{ + struct list_head *l1_n, *l2_p; + + l1_n = l1->next; + l2_p = l2->prev; + + l1->next = l2; + l2->prev = l1; + + l2_p->next = l1_n; + l1_n->prev = l2_p; +} + +static int __origin_write(struct list_head *snapshots, struct buffer_head *bh) +{ + int r = 1; + struct list_head *sl; + struct dm_snapshot *snap; + struct exception *e; + struct pending_exception *pe, *last = NULL; + chunk_t chunk; + + /* Do all the snapshots on this origin */ + list_for_each(sl, snapshots) { + snap = list_entry(sl, struct dm_snapshot, list); + + /* Only deal with valid snapshots */ + if (!snap->valid) + continue; + + down_write(&snap->lock); + + /* + * Remember, different snapshots can have + * different chunk sizes. + */ + chunk = sector_to_chunk(snap, bh->b_rsector); + + /* + * Check exception table to see if block + * is already remapped in this snapshot + * and trigger an exception if not. + */ + e = lookup_exception(&snap->complete, chunk); + if (!e) { + pe = find_pending_exception(snap, bh); + if (!pe) { + snap->store.drop_snapshot(&snap->store); + snap->valid = 0; + + } else { + if (last) + list_merge(&pe->siblings, + &last->siblings); + + last = pe; + r = 0; + } + } + + up_write(&snap->lock); + } + + /* + * Now that we have a complete pe list we can start the copying. + */ + if (last) { + pe = last; + do { + down_write(&pe->snap->lock); + queue_buffer(&pe->origin_bhs, bh); + start_copy(pe); + up_write(&pe->snap->lock); + pe = list_entry(pe->siblings.next, + struct pending_exception, siblings); + + } while (pe != last); + } + + return r; +} + +static int snapshot_status(status_type_t type, char *result, + int maxlen, void *context) +{ + struct dm_snapshot *snap = (struct dm_snapshot *) context; + char cow[16]; + char org[16]; + + switch (type) { + case STATUSTYPE_INFO: + if (!snap->valid) + snprintf(result, maxlen, "Invalid"); + else { + if (snap->store.percent_full) + snprintf(result, maxlen, "%d%%", + snap->store.percent_full(&snap-> + store)); + else + snprintf(result, maxlen, "Unknown"); + } + break; + + case STATUSTYPE_TABLE: + /* + * kdevname returns a static pointer so we need + * to make private copies if the output is to + * make sense. + */ + strncpy(cow, kdevname(snap->cow->dev), sizeof(cow)); + strncpy(org, kdevname(snap->origin->dev), sizeof(org)); + snprintf(result, maxlen, "%s %s %c %ld", org, cow, + snap->type, snap->chunk_size); + break; + } + + return 0; +} + +/* + * Called on a write from the origin driver. + */ +int do_origin(struct dm_dev *origin, struct buffer_head *bh) +{ + struct origin *o; + int r; + + down_read(&_origins_lock); + o = __lookup_origin(origin->dev); + if (!o) + BUG(); + + r = __origin_write(&o->snapshots, bh); + up_read(&_origins_lock); + + return r; +} + +/* + * Origin: maps a linear range of a device, with hooks for snapshotting. + */ + +/* + * Construct an origin mapping: + * The context for an origin is merely a 'struct dm_dev *' + * pointing to the real device. + */ +static int origin_ctr(struct dm_table *t, offset_t b, offset_t l, + int argc, char **argv, void **context) +{ + int r; + struct dm_dev *dev; + + if (argc != 1) { + *context = "dm-origin: incorrect number of arguments"; + return -EINVAL; + } + + r = dm_table_get_device(t, argv[0], 0, l, t->mode, &dev); + if (r) { + *context = "Cannot get target device"; + return r; + } + + *context = dev; + + return 0; +} + +static void origin_dtr(struct dm_table *t, void *c) +{ + struct dm_dev *dev = (struct dm_dev *) c; + dm_table_put_device(t, dev); +} + +static int origin_map(struct buffer_head *bh, int rw, void *context) +{ + struct dm_dev *dev = (struct dm_dev *) context; + bh->b_rdev = dev->dev; + + /* Only tell snapshots if this is a write */ + return (rw == WRITE) ? do_origin(dev, bh) : 1; +} + +static int origin_status(status_type_t type, char *result, + int maxlen, void *context) +{ + struct dm_dev *dev = (struct dm_dev *) context; + + switch (type) { + case STATUSTYPE_INFO: + result[0] = '\0'; + break; + + case STATUSTYPE_TABLE: + snprintf(result, maxlen, "%s", kdevname(dev->dev)); + break; + } + + return 0; +} + +static struct target_type origin_target = { + name: "snapshot-origin", + module: THIS_MODULE, + ctr: origin_ctr, + dtr: origin_dtr, + map: origin_map, + status: origin_status, + err: NULL +}; + +static struct target_type snapshot_target = { + name: "snapshot", + module: THIS_MODULE, + ctr: snapshot_ctr, + dtr: snapshot_dtr, + map: snapshot_map, + status: snapshot_status, + err: NULL +}; + +int __init dm_snapshot_init(void) +{ + int r; + + r = dm_register_target(&snapshot_target); + if (r) { + DMERR("snapshot target register failed %d", r); + return r; + } + + r = dm_register_target(&origin_target); + if (r < 0) { + DMERR("Device mapper: Origin: register failed %d\n", r); + goto bad1; + } + + r = init_origin_hash(); + if (r) { + DMERR("init_origin_hash failed."); + goto bad2; + } + + exception_cache = kmem_cache_create("dm-snapshot-ex", + sizeof(struct exception), + __alignof__(struct exception), + 0, NULL, NULL); + if (!exception_cache) { + DMERR("Couldn't create exception cache."); + r = -ENOMEM; + goto bad3; + } + + pending_cache = + kmem_cache_create("dm-snapshot-in", + sizeof(struct pending_exception), + __alignof__(struct pending_exception), + 0, NULL, NULL); + if (!pending_cache) { + DMERR("Couldn't create pending cache."); + r = -ENOMEM; + goto bad4; + } + + pending_pool = mempool_create(128, mempool_alloc_slab, + mempool_free_slab, pending_cache); + if (!pending_pool) { + DMERR("Couldn't create pending pool."); + r = -ENOMEM; + goto bad5; + } + + return 0; + + bad5: + kmem_cache_destroy(pending_cache); + bad4: + kmem_cache_destroy(exception_cache); + bad3: + exit_origin_hash(); + bad2: + dm_unregister_target(&origin_target); + bad1: + dm_unregister_target(&snapshot_target); + return r; +} + +void dm_snapshot_exit(void) +{ + int r; + + r = dm_unregister_target(&snapshot_target); + if (r) + DMERR("snapshot unregister failed %d", r); + + r = dm_unregister_target(&origin_target); + if (r) + DMERR("origin unregister failed %d", r); + + exit_origin_hash(); + mempool_destroy(pending_pool); + kmem_cache_destroy(pending_cache); + kmem_cache_destroy(exception_cache); +} + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/md/dm-snapshot.h linux.21pre4-ac2/drivers/md/dm-snapshot.h --- linux.21pre4/drivers/md/dm-snapshot.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/md/dm-snapshot.h 2003-02-04 15:54:23.000000000 +0000 @@ -0,0 +1,147 @@ +/* + * dm-snapshot.c + * + * Copyright (C) 2001-2002 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#ifndef DM_SNAPSHOT_H +#define DM_SNAPSHOT_H + +#include "dm.h" +#include + +struct exception_table { + uint32_t hash_mask; + struct list_head *table; +}; + +/* + * The snapshot code deals with largish chunks of the disk at a + * time. Typically 64k - 256k. + */ +/* FIXME: can we get away with limiting these to a uint32_t ? */ +typedef offset_t chunk_t; + +/* + * An exception is used where an old chunk of data has been + * replaced by a new one. + */ +struct exception { + struct list_head hash_list; + + chunk_t old_chunk; + chunk_t new_chunk; +}; + +/* + * Abstraction to handle the meta/layout of exception stores (the + * COW device). + */ +struct exception_store { + + /* + * Destroys this object when you've finished with it. + */ + void (*destroy) (struct exception_store *store); + + /* + * Find somewhere to store the next exception. + */ + int (*prepare_exception) (struct exception_store *store, + struct exception *e); + + /* + * Update the metadata with this exception. + */ + void (*commit_exception) (struct exception_store *store, + struct exception *e, + void (*callback) (void *, int success), + void *callback_context); + + /* + * The snapshot is invalid, note this in the metadata. + */ + void (*drop_snapshot) (struct exception_store *store); + + /* + * Return the %age full of the snapshot + */ + int (*percent_full) (struct exception_store *store); + + struct dm_snapshot *snap; + void *context; +}; + +struct dm_snapshot { + struct rw_semaphore lock; + struct dm_table *table; + + struct dm_dev *origin; + struct dm_dev *cow; + + /* List of snapshots per Origin */ + struct list_head list; + + /* Size of data blocks saved - must be a power of 2 */ + chunk_t chunk_size; + chunk_t chunk_mask; + chunk_t chunk_shift; + + /* You can't use a snapshot if this is 0 (e.g. if full) */ + int valid; + + /* Used for display of table */ + char type; + + /* The last percentage we notified */ + int last_percent; + + struct exception_table pending; + struct exception_table complete; + + /* The on disk metadata handler */ + struct exception_store store; +}; + +/* + * Used by the exception stores to load exceptions hen + * initialising. + */ +int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new); + +/* + * Constructor and destructor for the default persistent + * store. + */ +int dm_create_persistent(struct exception_store *store, uint32_t chunk_size); + +int dm_create_transient(struct exception_store *store, + struct dm_snapshot *s, int blocksize, void **error); + +/* + * Return the number of sectors in the device. + */ +static inline offset_t get_dev_size(kdev_t dev) +{ + int *sizes; + + sizes = blk_size[MAJOR(dev)]; + if (sizes) + return sizes[MINOR(dev)] << 1; + + return 0; +} + +static inline chunk_t sector_to_chunk(struct dm_snapshot *s, offset_t sector) +{ + return (sector & ~s->chunk_mask) >> s->chunk_shift; +} + +static inline offset_t chunk_to_sector(struct dm_snapshot *s, chunk_t chunk) +{ + return chunk << s->chunk_shift; +} + +#endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/md/dm-stripe.c linux.21pre4-ac2/drivers/md/dm-stripe.c --- linux.21pre4/drivers/md/dm-stripe.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/md/dm-stripe.c 2003-01-06 17:11:06.000000000 +0000 @@ -0,0 +1,234 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include +#include +#include + +struct stripe { + struct dm_dev *dev; + offset_t physical_start; +}; + +struct stripe_c { + offset_t logical_start; + uint32_t stripes; + + /* The size of this target / num. stripes */ + uint32_t stripe_width; + + /* stripe chunk size */ + uint32_t chunk_shift; + offset_t chunk_mask; + + struct stripe stripe[0]; +}; + +static inline struct stripe_c *alloc_context(int stripes) +{ + size_t len; + + if (array_too_big(sizeof(struct stripe_c), sizeof(struct stripe), + stripes)) + return NULL; + + len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes); + + return kmalloc(len, GFP_KERNEL); +} + +/* + * Parse a single pair + */ +static int get_stripe(struct dm_table *t, struct stripe_c *sc, + int stripe, char **argv) +{ + char *end; + unsigned long start; + + start = simple_strtoul(argv[1], &end, 10); + if (*end) + return -EINVAL; + + if (dm_table_get_device(t, argv[0], start, sc->stripe_width, + t->mode, &sc->stripe[stripe].dev)) + return -ENXIO; + + sc->stripe[stripe].physical_start = start; + return 0; +} + +/* + * Construct a striped mapping. + * [ ]+ + */ +static int stripe_ctr(struct dm_table *t, offset_t b, offset_t l, + int argc, char **argv, void **context) +{ + struct stripe_c *sc; + uint32_t stripes; + uint32_t chunk_size; + char *end; + int r, i; + + if (argc < 2) { + *context = "dm-stripe: Not enough arguments"; + return -EINVAL; + } + + stripes = simple_strtoul(argv[0], &end, 10); + if (*end) { + *context = "dm-stripe: Invalid stripe count"; + return -EINVAL; + } + + chunk_size = simple_strtoul(argv[1], &end, 10); + if (*end) { + *context = "dm-stripe: Invalid chunk_size"; + return -EINVAL; + } + + if (l % stripes) { + *context = "dm-stripe: Target length not divisable by " + "number of stripes"; + return -EINVAL; + } + + sc = alloc_context(stripes); + if (!sc) { + *context = "dm-stripe: Memory allocation for striped context " + "failed"; + return -ENOMEM; + } + + sc->logical_start = b; + sc->stripes = stripes; + sc->stripe_width = l / stripes; + + /* + * chunk_size is a power of two + */ + if (!chunk_size || (chunk_size & (chunk_size - 1))) { + *context = "dm-stripe: Invalid chunk size"; + kfree(sc); + return -EINVAL; + } + + sc->chunk_mask = chunk_size - 1; + for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++) + chunk_size >>= 1; + sc->chunk_shift--; + + /* + * Get the stripe destinations. + */ + for (i = 0; i < stripes; i++) { + if (argc < 2) { + *context = "dm-stripe: Not enough destinations " + "specified"; + kfree(sc); + return -EINVAL; + } + + argv += 2; + + r = get_stripe(t, sc, i, argv); + if (r < 0) { + *context = "dm-stripe: Couldn't parse stripe " + "destination"; + while (i--) + dm_table_put_device(t, sc->stripe[i].dev); + kfree(sc); + return r; + } + } + + *context = sc; + return 0; +} + +static void stripe_dtr(struct dm_table *t, void *c) +{ + unsigned int i; + struct stripe_c *sc = (struct stripe_c *) c; + + for (i = 0; i < sc->stripes; i++) + dm_table_put_device(t, sc->stripe[i].dev); + + kfree(sc); +} + +static int stripe_map(struct buffer_head *bh, int rw, void *context) +{ + struct stripe_c *sc = (struct stripe_c *) context; + + offset_t offset = bh->b_rsector - sc->logical_start; + uint32_t chunk = (uint32_t) (offset >> sc->chunk_shift); + uint32_t stripe = chunk % sc->stripes; /* 32bit modulus */ + chunk = chunk / sc->stripes; + + bh->b_rdev = sc->stripe[stripe].dev->dev; + bh->b_rsector = sc->stripe[stripe].physical_start + + (chunk << sc->chunk_shift) + (offset & sc->chunk_mask); + return 1; +} + +static int stripe_status(status_type_t type, char *result, int maxlen, + void *context) +{ + struct stripe_c *sc = (struct stripe_c *) context; + int offset; + int i; + + switch (type) { + case STATUSTYPE_INFO: + result[0] = '\0'; + break; + + case STATUSTYPE_TABLE: + offset = snprintf(result, maxlen, "%d %ld", + sc->stripes, sc->chunk_mask + 1); + for (i = 0; i < sc->stripes; i++) { + offset += + snprintf(result + offset, maxlen - offset, + " %s %ld", + kdevname(sc->stripe[i].dev->dev), + sc->stripe[i].physical_start); + } + break; + } + return 0; +} + +static struct target_type stripe_target = { + name: "striped", + module: THIS_MODULE, + ctr: stripe_ctr, + dtr: stripe_dtr, + map: stripe_map, + status: stripe_status, +}; + +int __init dm_stripe_init(void) +{ + int r; + + r = dm_register_target(&stripe_target); + if (r < 0) + DMWARN("striped target registration failed"); + + return r; +} + +void dm_stripe_exit(void) +{ + if (dm_unregister_target(&stripe_target)) + DMWARN("striped target unregistration failed"); + + return; +} diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/md/dm-table.c linux.21pre4-ac2/drivers/md/dm-table.c --- linux.21pre4/drivers/md/dm-table.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/md/dm-table.c 2003-01-06 17:11:06.000000000 +0000 @@ -0,0 +1,452 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include + +/* ceiling(n / size) * size */ +static inline unsigned long round_up(unsigned long n, unsigned long size) +{ + unsigned long r = n % size; + return n + (r ? (size - r) : 0); +} + +/* ceiling(n / size) */ +static inline unsigned long div_up(unsigned long n, unsigned long size) +{ + return round_up(n, size) / size; +} + +/* similar to ceiling(log_size(n)) */ +static uint int_log(unsigned long n, unsigned long base) +{ + int result = 0; + + while (n > 1) { + n = div_up(n, base); + result++; + } + + return result; +} + +/* + * return the highest key that you could lookup + * from the n'th node on level l of the btree. + */ +static offset_t high(struct dm_table *t, int l, int n) +{ + for (; l < t->depth - 1; l++) + n = get_child(n, CHILDREN_PER_NODE - 1); + + if (n >= t->counts[l]) + return (offset_t) - 1; + + return get_node(t, l, n)[KEYS_PER_NODE - 1]; +} + +/* + * fills in a level of the btree based on the + * highs of the level below it. + */ +static int setup_btree_index(int l, struct dm_table *t) +{ + int n, k; + offset_t *node; + + for (n = 0; n < t->counts[l]; n++) { + node = get_node(t, l, n); + + for (k = 0; k < KEYS_PER_NODE; k++) + node[k] = high(t, l + 1, get_child(n, k)); + } + + return 0; +} + +/* + * highs, and targets are managed as dynamic + * arrays during a table load. + */ +static int alloc_targets(struct dm_table *t, int num) +{ + offset_t *n_highs; + struct target *n_targets; + int n = t->num_targets; + + /* + * Allocate both the target array and offset array at once. + */ + n_highs = (offset_t *) vcalloc(sizeof(struct target) + sizeof(offset_t), + num); + if (!n_highs) + return -ENOMEM; + + n_targets = (struct target *) (n_highs + num); + + if (n) { + memcpy(n_highs, t->highs, sizeof(*n_highs) * n); + memcpy(n_targets, t->targets, sizeof(*n_targets) * n); + } + + memset(n_highs + n, -1, sizeof(*n_highs) * (num - n)); + if (t->highs) + vfree(t->highs); + + t->num_allocated = num; + t->highs = n_highs; + t->targets = n_targets; + + return 0; +} + +int dm_table_create(struct dm_table **result, int mode) +{ + struct dm_table *t = kmalloc(sizeof(*t), GFP_NOIO); + + if (!t) + return -ENOMEM; + + memset(t, 0, sizeof(*t)); + INIT_LIST_HEAD(&t->devices); + + /* allocate a single node's worth of targets to begin with */ + if (alloc_targets(t, KEYS_PER_NODE)) { + kfree(t); + t = NULL; + return -ENOMEM; + } + + init_waitqueue_head(&t->eventq); + t->mode = mode; + *result = t; + return 0; +} + +static void free_devices(struct list_head *devices) +{ + struct list_head *tmp, *next; + + for (tmp = devices->next; tmp != devices; tmp = next) { + struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); + next = tmp->next; + kfree(dd); + } +} + +void dm_table_destroy(struct dm_table *t) +{ + int i; + + /* destroying the table counts as an event */ + dm_table_event(t); + + /* free the indexes (see dm_table_complete) */ + if (t->depth >= 2) + vfree(t->index[t->depth - 2]); + + /* free the targets */ + for (i = 0; i < t->num_targets; i++) { + struct target *tgt = &t->targets[i]; + + dm_put_target_type(t->targets[i].type); + + if (tgt->type->dtr) + tgt->type->dtr(t, tgt->private); + } + + vfree(t->highs); + + /* free the device list */ + if (t->devices.next != &t->devices) { + DMWARN("devices still present during destroy: " + "dm_table_remove_device calls missing"); + + free_devices(&t->devices); + } + + kfree(t); +} + +/* + * Checks to see if we need to extend highs or targets. + */ +static inline int check_space(struct dm_table *t) +{ + if (t->num_targets >= t->num_allocated) + return alloc_targets(t, t->num_allocated * 2); + + return 0; +} + +/* + * Convert a device path to a kdev_t. + */ +int lookup_device(const char *path, kdev_t *dev) +{ + int r; + struct nameidata nd; + struct inode *inode; + + if (!path_init(path, LOOKUP_FOLLOW, &nd)) + return 0; + + if ((r = path_walk(path, &nd))) + goto bad; + + inode = nd.dentry->d_inode; + if (!inode) { + r = -ENOENT; + goto bad; + } + + if (!S_ISBLK(inode->i_mode)) { + r = -EINVAL; + goto bad; + } + + *dev = inode->i_rdev; + + bad: + path_release(&nd); + return r; +} + +/* + * See if we've already got a device in the list. + */ +static struct dm_dev *find_device(struct list_head *l, kdev_t dev) +{ + struct list_head *tmp; + + list_for_each(tmp, l) { + struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); + if (dd->dev == dev) + return dd; + } + + return NULL; +} + +/* + * Open a device so we can use it as a map destination. + */ +static int open_dev(struct dm_dev *d) +{ + int err; + + if (d->bd) + BUG(); + + if (!(d->bd = bdget(kdev_t_to_nr(d->dev)))) + return -ENOMEM; + + if ((err = blkdev_get(d->bd, d->mode, 0, BDEV_FILE))) + return err; + + return 0; +} + +/* + * Close a device that we've been using. + */ +static void close_dev(struct dm_dev *d) +{ + if (!d->bd) + return; + + blkdev_put(d->bd, BDEV_FILE); + d->bd = NULL; +} + +/* + * If possible (ie. blk_size[major] is set), this + * checks an area of a destination device is + * valid. + */ +static int check_device_area(kdev_t dev, offset_t start, offset_t len) +{ + int *sizes; + offset_t dev_size; + + if (!(sizes = blk_size[MAJOR(dev)]) || !(dev_size = sizes[MINOR(dev)])) + /* we don't know the device details, + * so give the benefit of the doubt */ + return 1; + + /* convert to 512-byte sectors */ + dev_size <<= 1; + + return ((start < dev_size) && (len <= (dev_size - start))); +} + +/* + * This upgrades the mode on an already open dm_dev. Being + * careful to leave things as they were if we fail to reopen the + * device. + */ +static int upgrade_mode(struct dm_dev *dd, int new_mode) +{ + int r; + struct dm_dev dd_copy; + + memcpy(&dd_copy, dd, sizeof(dd_copy)); + + dd->mode |= new_mode; + dd->bd = NULL; + r = open_dev(dd); + if (!r) + close_dev(&dd_copy); + else + memcpy(dd, &dd_copy, sizeof(dd_copy)); + + return r; +} + +/* + * Add a device to the list, or just increment the usage count + * if it's already present. + */ +int dm_table_get_device(struct dm_table *t, const char *path, + offset_t start, offset_t len, int mode, + struct dm_dev **result) +{ + int r; + kdev_t dev; + struct dm_dev *dd; + int major, minor; + + if (sscanf(path, "%x:%x", &major, &minor) == 2) { + /* Extract the major/minor numbers */ + dev = MKDEV(major, minor); + } else { + /* convert the path to a device */ + if ((r = lookup_device(path, &dev))) + return r; + } + + dd = find_device(&t->devices, dev); + if (!dd) { + dd = kmalloc(sizeof(*dd), GFP_KERNEL); + if (!dd) + return -ENOMEM; + + dd->mode = mode; + dd->dev = dev; + dd->bd = NULL; + + if ((r = open_dev(dd))) { + kfree(dd); + return r; + } + + atomic_set(&dd->count, 0); + list_add(&dd->list, &t->devices); + + } else if (dd->mode != (mode | dd->mode)) { + r = upgrade_mode(dd, mode); + if (r) + return r; + } + atomic_inc(&dd->count); + + if (!check_device_area(dd->dev, start, len)) { + DMWARN("device %s too small for target", path); + dm_table_put_device(t, dd); + return -EINVAL; + } + + *result = dd; + + return 0; +} + +/* + * Decrement a devices use count and remove it if neccessary. + */ +void dm_table_put_device(struct dm_table *t, struct dm_dev *dd) +{ + if (atomic_dec_and_test(&dd->count)) { + close_dev(dd); + list_del(&dd->list); + kfree(dd); + } +} + +/* + * Adds a target to the map + */ +int dm_table_add_target(struct dm_table *t, offset_t highs, + struct target_type *type, void *private) +{ + int r, n; + + if ((r = check_space(t))) + return r; + + n = t->num_targets++; + t->highs[n] = highs; + t->targets[n].type = type; + t->targets[n].private = private; + + return 0; +} + +static int setup_indexes(struct dm_table *t) +{ + int i, total = 0; + offset_t *indexes; + + /* allocate the space for *all* the indexes */ + for (i = t->depth - 2; i >= 0; i--) { + t->counts[i] = div_up(t->counts[i + 1], CHILDREN_PER_NODE); + total += t->counts[i]; + } + + indexes = (offset_t *) vcalloc(total, (unsigned long) NODE_SIZE); + if (!indexes) + return -ENOMEM; + + /* set up internal nodes, bottom-up */ + for (i = t->depth - 2, total = 0; i >= 0; i--) { + t->index[i] = indexes; + indexes += (KEYS_PER_NODE * t->counts[i]); + setup_btree_index(i, t); + } + + return 0; +} + +/* + * Builds the btree to index the map + */ +int dm_table_complete(struct dm_table *t) +{ + int leaf_nodes, r = 0; + + /* how many indexes will the btree have ? */ + leaf_nodes = div_up(t->num_targets, KEYS_PER_NODE); + t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); + + /* leaf layer has already been set up */ + t->counts[t->depth - 1] = leaf_nodes; + t->index[t->depth - 1] = t->highs; + + if (t->depth >= 2) + r = setup_indexes(t); + + return r; +} + +void dm_table_event(struct dm_table *t) +{ + wake_up_interruptible(&t->eventq); +} + +EXPORT_SYMBOL(dm_table_get_device); +EXPORT_SYMBOL(dm_table_put_device); +EXPORT_SYMBOL(dm_table_event); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/md/dm-target.c linux.21pre4-ac2/drivers/md/dm-target.c --- linux.21pre4/drivers/md/dm-target.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/md/dm-target.c 2003-01-06 17:11:06.000000000 +0000 @@ -0,0 +1,242 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include + +struct tt_internal { + struct target_type tt; + + struct list_head list; + long use; +}; + +static LIST_HEAD(_targets); +static rwlock_t _lock = RW_LOCK_UNLOCKED; + +#define DM_MOD_NAME_SIZE 32 + +/* + * Destructively splits up the argument list to pass to ctr. + */ +int split_args(int max, int *argc, char **argv, char *input) +{ + char *start, *end = input, *out; + *argc = 0; + + while (1) { + start = end; + + /* Skip whitespace */ + while (*start && isspace(*start)) + start++; + + if (!*start) + break; /* success, we hit the end */ + + /* 'out' is used to remove any back-quotes */ + end = out = start; + while (*end) { + /* Everything apart from '\0' can be quoted */ + if (*end == '\\' && *(end + 1)) { + *out++ = *(end + 1); + end += 2; + continue; + } + + if (isspace(*end)) + break; /* end of token */ + + *out++ = *end++; + } + + /* have we already filled the array ? */ + if ((*argc + 1) > max) + return -EINVAL; + + /* we know this is whitespace */ + if (*end) + end++; + + /* terminate the string and put it in the array */ + *out = '\0'; + argv[*argc] = start; + (*argc)++; + } + + return 0; +} + +static inline struct tt_internal *__find_target_type(const char *name) +{ + struct list_head *tih; + struct tt_internal *ti; + + list_for_each(tih, &_targets) { + ti = list_entry(tih, struct tt_internal, list); + + if (!strcmp(name, ti->tt.name)) + return ti; + } + + return NULL; +} + +static struct tt_internal *get_target_type(const char *name) +{ + struct tt_internal *ti; + + read_lock(&_lock); + ti = __find_target_type(name); + + if (ti) { + if (ti->use == 0 && ti->tt.module) + __MOD_INC_USE_COUNT(ti->tt.module); + ti->use++; + } + read_unlock(&_lock); + + return ti; +} + +static void load_module(const char *name) +{ + char module_name[DM_MOD_NAME_SIZE] = "dm-"; + + /* Length check for strcat() below */ + if (strlen(name) > (DM_MOD_NAME_SIZE - 4)) + return; + + strcat(module_name, name); + request_module(module_name); + + return; +} + +struct target_type *dm_get_target_type(const char *name) +{ + struct tt_internal *ti = get_target_type(name); + + if (!ti) { + load_module(name); + ti = get_target_type(name); + } + + return ti ? &ti->tt : NULL; +} + +void dm_put_target_type(struct target_type *t) +{ + struct tt_internal *ti = (struct tt_internal *) t; + + read_lock(&_lock); + if (--ti->use == 0 && ti->tt.module) + __MOD_DEC_USE_COUNT(ti->tt.module); + + if (ti->use < 0) + BUG(); + read_unlock(&_lock); + + return; +} + +static struct tt_internal *alloc_target(struct target_type *t) +{ + struct tt_internal *ti = kmalloc(sizeof(*ti), GFP_KERNEL); + + if (ti) { + memset(ti, 0, sizeof(*ti)); + ti->tt = *t; + } + + return ti; +} + +int dm_register_target(struct target_type *t) +{ + int rv = 0; + struct tt_internal *ti = alloc_target(t); + + if (!ti) + return -ENOMEM; + + write_lock(&_lock); + if (__find_target_type(t->name)) + rv = -EEXIST; + else + list_add(&ti->list, &_targets); + + write_unlock(&_lock); + return rv; +} + +int dm_unregister_target(struct target_type *t) +{ + struct tt_internal *ti; + + write_lock(&_lock); + if (!(ti = __find_target_type(t->name))) { + write_unlock(&_lock); + return -EINVAL; + } + + if (ti->use) { + write_unlock(&_lock); + return -ETXTBSY; + } + + list_del(&ti->list); + kfree(ti); + + write_unlock(&_lock); + return 0; +} + +/* + * io-err: always fails an io, useful for bringing + * up LV's that have holes in them. + */ +static int io_err_ctr(struct dm_table *t, offset_t b, offset_t l, + int argc, char **args, void **context) +{ + *context = NULL; + return 0; +} + +static void io_err_dtr(struct dm_table *t, void *c) +{ + /* empty */ + return; +} + +static int io_err_map(struct buffer_head *bh, int rw, void *context) +{ + buffer_IO_error(bh); + return 0; +} + +static struct target_type error_target = { + name: "error", + ctr: io_err_ctr, + dtr: io_err_dtr, + map: io_err_map, + status: NULL, +}; + +int dm_target_init(void) +{ + return dm_register_target(&error_target); +} + +void dm_target_exit(void) +{ + if (dm_unregister_target(&error_target)) + DMWARN("error target unregistration failed"); +} + +EXPORT_SYMBOL(dm_register_target); +EXPORT_SYMBOL(dm_unregister_target); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/md/kcopyd.c linux.21pre4-ac2/drivers/md/kcopyd.c --- linux.21pre4/drivers/md/kcopyd.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/md/kcopyd.c 2003-01-06 17:11:06.000000000 +0000 @@ -0,0 +1,841 @@ +/* + * Copyright (C) 2002 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kcopyd.h" + +/* FIXME: this is only needed for the DMERR macros */ +#include "dm.h" + +/* + * Hard sector size used all over the kernel. + */ +#define SECTOR_SIZE 512 +#define SECTOR_SHIFT 9 + +static void wake_kcopyd(void); + +/*----------------------------------------------------------------- + * We reserve our own pool of preallocated pages that are + * only used for kcopyd io. + *---------------------------------------------------------------*/ + +/* + * FIXME: This should be configurable. + */ +#define NUM_PAGES 512 + +static DECLARE_MUTEX(_pages_lock); +static int _num_free_pages; +static struct page *_pages_array[NUM_PAGES]; +static DECLARE_MUTEX(start_lock); + +static int init_pages(void) +{ + int i; + struct page *p; + + for (i = 0; i < NUM_PAGES; i++) { + p = alloc_page(GFP_KERNEL); + if (!p) + goto bad; + + LockPage(p); + _pages_array[i] = p; + } + + _num_free_pages = NUM_PAGES; + return 0; + + bad: + while (i--) + __free_page(_pages_array[i]); + return -ENOMEM; +} + +static void exit_pages(void) +{ + int i; + struct page *p; + + for (i = 0; i < NUM_PAGES; i++) { + p = _pages_array[i]; + UnlockPage(p); + __free_page(p); + } + + _num_free_pages = 0; +} + +static int kcopyd_get_pages(int num, struct page **result) +{ + int i; + + down(&_pages_lock); + if (_num_free_pages < num) { + up(&_pages_lock); + return -ENOMEM; + } + + for (i = 0; i < num; i++) { + _num_free_pages--; + result[i] = _pages_array[_num_free_pages]; + } + up(&_pages_lock); + + return 0; +} + +static void kcopyd_free_pages(int num, struct page **result) +{ + int i; + + down(&_pages_lock); + for (i = 0; i < num; i++) + _pages_array[_num_free_pages++] = result[i]; + up(&_pages_lock); +} + +/*----------------------------------------------------------------- + * We keep our own private pool of buffer_heads. These are just + * held in a list on the b_reqnext field. + *---------------------------------------------------------------*/ + +/* + * Make sure we have enough buffers to always keep the pages + * occupied. So we assume the worst case scenario where blocks + * are the size of a single sector. + */ +#define NUM_BUFFERS NUM_PAGES * (PAGE_SIZE / SECTOR_SIZE) + +static spinlock_t _buffer_lock = SPIN_LOCK_UNLOCKED; +static struct buffer_head *_all_buffers; +static struct buffer_head *_free_buffers; + +static int init_buffers(void) +{ + int i; + struct buffer_head *buffers; + + buffers = vcalloc(NUM_BUFFERS, sizeof(struct buffer_head)); + if (!buffers) { + DMWARN("Couldn't allocate buffer heads."); + return -ENOMEM; + } + + for (i = 0; i < NUM_BUFFERS; i++) { + if (i < NUM_BUFFERS - 1) + buffers[i].b_reqnext = &buffers[i + 1]; + init_waitqueue_head(&buffers[i].b_wait); + INIT_LIST_HEAD(&buffers[i].b_inode_buffers); + } + + _all_buffers = _free_buffers = buffers; + return 0; +} + +static void exit_buffers(void) +{ + vfree(_all_buffers); +} + +static struct buffer_head *alloc_buffer(void) +{ + struct buffer_head *r; + int flags; + + spin_lock_irqsave(&_buffer_lock, flags); + + if (!_free_buffers) + r = NULL; + else { + r = _free_buffers; + _free_buffers = _free_buffers->b_reqnext; + r->b_reqnext = NULL; + } + + spin_unlock_irqrestore(&_buffer_lock, flags); + + return r; +} + +/* + * Only called from interrupt context. + */ +static void free_buffer(struct buffer_head *bh) +{ + int flags, was_empty; + + spin_lock_irqsave(&_buffer_lock, flags); + was_empty = (_free_buffers == NULL) ? 1 : 0; + bh->b_reqnext = _free_buffers; + _free_buffers = bh; + spin_unlock_irqrestore(&_buffer_lock, flags); + + /* + * If the buffer list was empty then kcopyd probably went + * to sleep because it ran out of buffer heads, so let's + * wake it up. + */ + if (was_empty) + wake_kcopyd(); +} + +/*----------------------------------------------------------------- + * kcopyd_jobs need to be allocated by the *clients* of kcopyd, + * for this reason we use a mempool to prevent the client from + * ever having to do io (which could cause a + * deadlock). + *---------------------------------------------------------------*/ +#define MIN_JOBS NUM_PAGES + +static kmem_cache_t *_job_cache = NULL; +static mempool_t *_job_pool = NULL; + +/* + * We maintain three lists of jobs: + * + * i) jobs waiting for pages + * ii) jobs that have pages, and are waiting for the io to be issued. + * iii) jobs that have completed. + * + * All three of these are protected by job_lock. + */ + +static spinlock_t _job_lock = SPIN_LOCK_UNLOCKED; + +static LIST_HEAD(_complete_jobs); +static LIST_HEAD(_io_jobs); +static LIST_HEAD(_pages_jobs); + +static int init_jobs(void) +{ + INIT_LIST_HEAD(&_complete_jobs); + INIT_LIST_HEAD(&_io_jobs); + INIT_LIST_HEAD(&_pages_jobs); + + _job_cache = kmem_cache_create("kcopyd-jobs", sizeof(struct kcopyd_job), + __alignof__(struct kcopyd_job), + 0, NULL, NULL); + if (!_job_cache) + return -ENOMEM; + + _job_pool = mempool_create(MIN_JOBS, mempool_alloc_slab, + mempool_free_slab, _job_cache); + if (!_job_pool) { + kmem_cache_destroy(_job_cache); + return -ENOMEM; + } + + return 0; +} + +static void exit_jobs(void) +{ + mempool_destroy(_job_pool); + kmem_cache_destroy(_job_cache); +} + +struct kcopyd_job *kcopyd_alloc_job(void) +{ + struct kcopyd_job *job; + + job = mempool_alloc(_job_pool, GFP_KERNEL); + if (!job) + return NULL; + + memset(job, 0, sizeof(*job)); + return job; +} + +void kcopyd_free_job(struct kcopyd_job *job) +{ + mempool_free(job, _job_pool); +} + +/* + * Functions to push and pop a job onto the head of a given job + * list. + */ +static inline struct kcopyd_job *pop(struct list_head *jobs) +{ + struct kcopyd_job *job = NULL; + int flags; + + spin_lock_irqsave(&_job_lock, flags); + + if (!list_empty(jobs)) { + job = list_entry(jobs->next, struct kcopyd_job, list); + list_del(&job->list); + } + spin_unlock_irqrestore(&_job_lock, flags); + + return job; +} + +static inline void push(struct list_head *jobs, struct kcopyd_job *job) +{ + int flags; + + spin_lock_irqsave(&_job_lock, flags); + list_add(&job->list, jobs); + spin_unlock_irqrestore(&_job_lock, flags); +} + +/* + * Completion function for one of our buffers. + */ +static void end_bh(struct buffer_head *bh, int uptodate) +{ + struct kcopyd_job *job = bh->b_private; + + mark_buffer_uptodate(bh, uptodate); + unlock_buffer(bh); + + if (!uptodate) + job->err = -EIO; + + /* are we the last ? */ + if (atomic_dec_and_test(&job->nr_incomplete)) { + push(&_complete_jobs, job); + wake_kcopyd(); + } + + free_buffer(bh); +} + +static void dispatch_bh(struct kcopyd_job *job, + struct buffer_head *bh, int block) +{ + int p; + + /* + * Add in the job offset + */ + bh->b_blocknr = (job->disk.sector >> job->block_shift) + block; + + p = block >> job->bpp_shift; + block &= job->bpp_mask; + + bh->b_dev = B_FREE; + bh->b_size = job->block_size; + set_bh_page(bh, job->pages[p], ((block << job->block_shift) + + job->offset) << SECTOR_SHIFT); + bh->b_this_page = bh; + + init_buffer(bh, end_bh, job); + + bh->b_dev = job->disk.dev; + bh->b_state = ((1 << BH_Mapped) | (1 << BH_Lock) | (1 << BH_Req)); + + set_bit(BH_Uptodate, &bh->b_state); + if (job->rw == WRITE) + clear_bit(BH_Dirty, &bh->b_state); + + submit_bh(job->rw, bh); +} + +/* + * These three functions process 1 item from the corresponding + * job list. + * + * They return: + * < 0: error + * 0: success + * > 0: can't process yet. + */ +static int run_complete_job(struct kcopyd_job *job) +{ + job->callback(job); + return 0; +} + +/* + * Request io on as many buffer heads as we can currently get for + * a particular job. + */ +static int run_io_job(struct kcopyd_job *job) +{ + unsigned int block; + struct buffer_head *bh; + + for (block = atomic_read(&job->nr_requested); + block < job->nr_blocks; block++) { + bh = alloc_buffer(); + if (!bh) + break; + + atomic_inc(&job->nr_requested); + dispatch_bh(job, bh, block); + } + + return (block == job->nr_blocks) ? 0 : 1; +} + +static int run_pages_job(struct kcopyd_job *job) +{ + int r; + + job->nr_pages = (job->disk.count + job->offset) / + (PAGE_SIZE / SECTOR_SIZE); + r = kcopyd_get_pages(job->nr_pages, job->pages); + + if (!r) { + /* this job is ready for io */ + push(&_io_jobs, job); + return 0; + } + + if (r == -ENOMEM) + /* can complete now */ + return 1; + + return r; +} + +/* + * Run through a list for as long as possible. Returns the count + * of successful jobs. + */ +static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *)) +{ + struct kcopyd_job *job; + int r, count = 0; + + while ((job = pop(jobs))) { + + r = fn(job); + + if (r < 0) { + /* error this rogue job */ + job->err = r; + push(&_complete_jobs, job); + break; + } + + if (r > 0) { + /* + * We couldn't service this job ATM, so + * push this job back onto the list. + */ + push(jobs, job); + break; + } + + count++; + } + + return count; +} + +/* + * kcopyd does this every time it's woken up. + */ +static void do_work(void) +{ + int count; + + /* + * We loop round until there is no more work to do. + */ + do { + count = process_jobs(&_complete_jobs, run_complete_job); + count += process_jobs(&_io_jobs, run_io_job); + count += process_jobs(&_pages_jobs, run_pages_job); + + } while (count); + + run_task_queue(&tq_disk); +} + +/*----------------------------------------------------------------- + * The daemon + *---------------------------------------------------------------*/ +static atomic_t _kcopyd_must_die; +static DECLARE_MUTEX(_run_lock); +static DECLARE_WAIT_QUEUE_HEAD(_job_queue); + +static int kcopyd(void *arg) +{ + DECLARE_WAITQUEUE(wq, current); + + daemonize(); + strcpy(current->comm, "kcopyd"); + atomic_set(&_kcopyd_must_die, 0); + + add_wait_queue(&_job_queue, &wq); + + down(&_run_lock); + up(&start_lock); + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + + if (atomic_read(&_kcopyd_must_die)) + break; + + do_work(); + schedule(); + } + + set_current_state(TASK_RUNNING); + remove_wait_queue(&_job_queue, &wq); + + up(&_run_lock); + + return 0; +} + +static int start_daemon(void) +{ + static pid_t pid = 0; + + down(&start_lock); + + pid = kernel_thread(kcopyd, NULL, 0); + if (pid <= 0) { + DMERR("Failed to start kcopyd thread"); + return -EAGAIN; + } + + /* + * wait for the daemon to up this mutex. + */ + down(&start_lock); + up(&start_lock); + + return 0; +} + +static int stop_daemon(void) +{ + atomic_set(&_kcopyd_must_die, 1); + wake_kcopyd(); + down(&_run_lock); + up(&_run_lock); + + return 0; +} + +static void wake_kcopyd(void) +{ + wake_up_interruptible(&_job_queue); +} + +static int calc_shift(unsigned int n) +{ + int s; + + for (s = 0; n; s++, n >>= 1) + ; + + return --s; +} + +static void calc_block_sizes(struct kcopyd_job *job) +{ + job->block_size = get_hardsect_size(job->disk.dev); + job->block_shift = calc_shift(job->block_size / SECTOR_SIZE); + job->bpp_shift = PAGE_SHIFT - job->block_shift - SECTOR_SHIFT; + job->bpp_mask = (1 << job->bpp_shift) - 1; + job->nr_blocks = job->disk.count >> job->block_shift; + atomic_set(&job->nr_requested, 0); + atomic_set(&job->nr_incomplete, job->nr_blocks); +} + +int kcopyd_io(struct kcopyd_job *job) +{ + calc_block_sizes(job); + push(job->pages[0] ? &_io_jobs : &_pages_jobs, job); + wake_kcopyd(); + return 0; +} + +/*----------------------------------------------------------------- + * The copier is implemented on top of the simpler async io + * daemon above. + *---------------------------------------------------------------*/ +struct copy_info { + kcopyd_notify_fn notify; + void *notify_context; + + struct kcopyd_region to; +}; + +#define MIN_INFOS 128 +static kmem_cache_t *_copy_cache = NULL; +static mempool_t *_copy_pool = NULL; + +static int init_copier(void) +{ + _copy_cache = kmem_cache_create("kcopyd-info", + sizeof(struct copy_info), + __alignof__(struct copy_info), + 0, NULL, NULL); + if (!_copy_cache) + return -ENOMEM; + + _copy_pool = mempool_create(MIN_INFOS, mempool_alloc_slab, + mempool_free_slab, _copy_cache); + if (!_copy_pool) { + kmem_cache_destroy(_copy_cache); + return -ENOMEM; + } + + return 0; +} + +static void exit_copier(void) +{ + if (_copy_pool) + mempool_destroy(_copy_pool); + + if (_copy_cache) + kmem_cache_destroy(_copy_cache); +} + +static inline struct copy_info *alloc_copy_info(void) +{ + return mempool_alloc(_copy_pool, GFP_KERNEL); +} + +static inline void free_copy_info(struct copy_info *info) +{ + mempool_free(info, _copy_pool); +} + +void copy_complete(struct kcopyd_job *job) +{ + struct copy_info *info = (struct copy_info *) job->context; + + if (info->notify) + info->notify(job->err, info->notify_context); + + free_copy_info(info); + + kcopyd_free_pages(job->nr_pages, job->pages); + + kcopyd_free_job(job); +} + +static void page_write_complete(struct kcopyd_job *job) +{ + struct copy_info *info = (struct copy_info *) job->context; + int i; + + if (info->notify) + info->notify(job->err, info->notify_context); + + free_copy_info(info); + for (i = 0; i < job->nr_pages; i++) + put_page(job->pages[i]); + + kcopyd_free_job(job); +} + +/* + * These callback functions implement the state machine that copies regions. + */ +void copy_write(struct kcopyd_job *job) +{ + struct copy_info *info = (struct copy_info *) job->context; + + if (job->err && info->notify) { + info->notify(job->err, job->context); + kcopyd_free_job(job); + free_copy_info(info); + return; + } + + job->rw = WRITE; + memcpy(&job->disk, &info->to, sizeof(job->disk)); + job->callback = copy_complete; + job->context = info; + + /* + * Queue the write. + */ + kcopyd_io(job); +} + +int kcopyd_write_pages(struct kcopyd_region *to, int nr_pages, + struct page **pages, int offset, kcopyd_notify_fn fn, + void *context) +{ + struct copy_info *info; + struct kcopyd_job *job; + int i; + + /* + * Allocate a new copy_info. + */ + info = alloc_copy_info(); + if (!info) + return -ENOMEM; + + job = kcopyd_alloc_job(); + if (!job) { + free_copy_info(info); + return -ENOMEM; + } + + /* + * set up for the write. + */ + info->notify = fn; + info->notify_context = context; + memcpy(&info->to, to, sizeof(*to)); + + /* Get the pages */ + job->nr_pages = nr_pages; + for (i = 0; i < nr_pages; i++) { + get_page(pages[i]); + job->pages[i] = pages[i]; + } + + job->rw = WRITE; + + memcpy(&job->disk, &info->to, sizeof(job->disk)); + job->offset = offset; + calc_block_sizes(job); + job->callback = page_write_complete; + job->context = info; + + /* + * Trigger job. + */ + kcopyd_io(job); + return 0; +} + +int kcopyd_copy(struct kcopyd_region *from, struct kcopyd_region *to, + kcopyd_notify_fn fn, void *context) +{ + struct copy_info *info; + struct kcopyd_job *job; + + /* + * Allocate a new copy_info. + */ + info = alloc_copy_info(); + if (!info) + return -ENOMEM; + + job = kcopyd_alloc_job(); + if (!job) { + free_copy_info(info); + return -ENOMEM; + } + + /* + * set up for the read. + */ + info->notify = fn; + info->notify_context = context; + memcpy(&info->to, to, sizeof(*to)); + + job->rw = READ; + memcpy(&job->disk, from, sizeof(*from)); + + job->offset = 0; + calc_block_sizes(job); + job->callback = copy_write; + job->context = info; + + /* + * Trigger job. + */ + kcopyd_io(job); + return 0; +} + +/*----------------------------------------------------------------- + * Unit setup + *---------------------------------------------------------------*/ +static struct { + int (*init) (void); + void (*exit) (void); + +} _inits[] = { +#define xx(n) { init_ ## n, exit_ ## n} + xx(pages), + xx(buffers), + xx(jobs), + xx(copier) +#undef xx +}; + +static int _client_count = 0; +static DECLARE_MUTEX(_client_count_sem); + +static int kcopyd_init(void) +{ + const int count = sizeof(_inits) / sizeof(*_inits); + + int r, i; + + for (i = 0; i < count; i++) { + r = _inits[i].init(); + if (r) + goto bad; + } + + start_daemon(); + return 0; + + bad: + while (i--) + _inits[i].exit(); + + return r; +} + +static void kcopyd_exit(void) +{ + int i = sizeof(_inits) / sizeof(*_inits); + + if (stop_daemon()) + DMWARN("Couldn't stop kcopyd."); + + while (i--) + _inits[i].exit(); +} + +void kcopyd_inc_client_count(void) +{ + /* + * What I need here is an atomic_test_and_inc that returns + * the previous value of the atomic... In its absence I lock + * an int with a semaphore. :-( + */ + down(&_client_count_sem); + if (_client_count == 0) + kcopyd_init(); + _client_count++; + + up(&_client_count_sem); +} + +void kcopyd_dec_client_count(void) +{ + down(&_client_count_sem); + if (--_client_count == 0) + kcopyd_exit(); + + up(&_client_count_sem); +} diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/md/kcopyd.h linux.21pre4-ac2/drivers/md/kcopyd.h --- linux.21pre4/drivers/md/kcopyd.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/md/kcopyd.h 2003-02-04 15:54:23.000000000 +0000 @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2001 Sistina Software + * + * This file is released under the GPL. + */ + +#ifndef DM_KCOPYD_H +#define DM_KCOPYD_H + +/* + * Needed for the definition of offset_t. + */ +#include +#include + +struct kcopyd_region { + kdev_t dev; + offset_t sector; + offset_t count; +}; + +#define MAX_KCOPYD_PAGES 128 + +struct kcopyd_job { + struct list_head list; + + /* + * Error state of the job. + */ + int err; + + /* + * Either READ or WRITE + */ + int rw; + + /* + * The source or destination for the transfer. + */ + struct kcopyd_region disk; + + int nr_pages; + struct page *pages[MAX_KCOPYD_PAGES]; + + /* + * Shifts and masks that will be useful when dispatching + * each buffer_head. + */ + offset_t offset; + offset_t block_size; + offset_t block_shift; + offset_t bpp_shift; /* blocks per page */ + offset_t bpp_mask; + + /* + * nr_blocks is how many buffer heads will have to be + * displatched to service this job, nr_requested is how + * many have been dispatched and nr_complete is how many + * have come back. + */ + unsigned int nr_blocks; + atomic_t nr_requested; + atomic_t nr_incomplete; + + /* + * Set this to ensure you are notified when the job has + * completed. 'context' is for callback to use. + */ + void (*callback)(struct kcopyd_job *job); + void *context; +}; + +/* + * Low level async io routines. + */ +struct kcopyd_job *kcopyd_alloc_job(void); +void kcopyd_free_job(struct kcopyd_job *job); + +int kcopyd_queue_job(struct kcopyd_job *job); + +/* + * Submit a copy job to kcopyd. This is built on top of the + * previous three fns. + */ +typedef void (*kcopyd_notify_fn)(int err, void *context); + +int kcopyd_copy(struct kcopyd_region *from, struct kcopyd_region *to, + kcopyd_notify_fn fn, void *context); + +int kcopyd_write_pages(struct kcopyd_region *to, int nr_pages, + struct page **pages, int offset, kcopyd_notify_fn fn, + void *context); + +/* + * We only want kcopyd to reserve resources if someone is + * actually using it. + */ +void kcopyd_inc_client_count(void); +void kcopyd_dec_client_count(void); + +#endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/md/Makefile linux.21pre4-ac2/drivers/md/Makefile --- linux.21pre4/drivers/md/Makefile 2003-01-29 16:27:04.000000000 +0000 +++ linux.21pre4-ac2/drivers/md/Makefile 2003-01-06 17:11:06.000000000 +0000 @@ -4,9 +4,11 @@ O_TARGET := mddev.o -export-objs := md.o xor.o +export-objs := md.o xor.o dm-table.o dm-target.o kcopyd.o list-multi := lvm-mod.o lvm-mod-objs := lvm.o lvm-snap.o lvm-fs.o +dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \ + dm-ioctl.o dm-snapshot.o dm-exception-store.o kcopyd.o # Note: link order is important. All raid personalities # and xor.o must come before md.o, as they each initialise @@ -20,8 +22,12 @@ obj-$(CONFIG_MD_MULTIPATH) += multipath.o obj-$(CONFIG_BLK_DEV_MD) += md.o obj-$(CONFIG_BLK_DEV_LVM) += lvm-mod.o +obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o include $(TOPDIR)/Rules.make lvm-mod.o: $(lvm-mod-objs) $(LD) -r -o $@ $(lvm-mod-objs) + +dm-mod.o: $(dm-mod-objs) + $(LD) -r -o $@ $(dm-mod-objs) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/md/md.c linux.21pre4-ac2/drivers/md/md.c --- linux.21pre4/drivers/md/md.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/md/md.c 2003-01-29 17:17:53.000000000 +0000 @@ -77,7 +77,7 @@ */ static int sysctl_speed_limit_min = 100; -static int sysctl_speed_limit_max = 100000; +static int sysctl_speed_limit_max = 10000; static struct ctl_table_header *raid_table_header; @@ -2930,8 +2930,6 @@ * bdflush, otherwise bdflush will deadlock if there are too * many dirty RAID5 blocks. */ - current->policy = SCHED_OTHER; - current->nice = -20; md_unlock_kernel(); complete(thread->event); @@ -3384,11 +3382,6 @@ "(but not more than %d KB/sec) for reconstruction.\n", sysctl_speed_limit_max); - /* - * Resync has low priority. - */ - current->nice = 19; - is_mddev_idle(mddev); /* this also initializes IO event counters */ for (m = 0; m < SYNC_MARKS; m++) { mark[m] = jiffies; @@ -3466,16 +3459,13 @@ currspeed = (j-mddev->resync_mark_cnt)/2/((jiffies-mddev->resync_mark)/HZ +1) +1; if (currspeed > sysctl_speed_limit_min) { - current->nice = 19; - if ((currspeed > sysctl_speed_limit_max) || !is_mddev_idle(mddev)) { current->state = TASK_INTERRUPTIBLE; md_schedule_timeout(HZ/4); goto repeat; } - } else - current->nice = -20; + } } printk(KERN_INFO "md: md%d: sync done.\n",mdidx(mddev)); err = 0; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/media/radio/radio-cadet.c linux.21pre4-ac2/drivers/media/radio/radio-cadet.c --- linux.21pre4/drivers/media/radio/radio-cadet.c 2003-01-29 16:27:01.000000000 +0000 +++ linux.21pre4-ac2/drivers/media/radio/radio-cadet.c 2003-02-04 15:01:48.000000000 +0000 @@ -20,7 +20,9 @@ * Removed dead CONFIG_RADIO_CADET_PORT code * PnP detection on load is now default (no args necessary) * -*/ + * 2003-01-31 Alan Cox + * Cleaned up locking, delay code, general odds and ends + */ #include /* Modules */ #include /* Initdata */ @@ -40,11 +42,11 @@ static int curtuner=0; static int tunestat=0; static int sigstrength=0; -static wait_queue_head_t tunerq,rdsq,readq; +static wait_queue_head_t readq; struct timer_list tunertimer,rdstimer,readtimer; static __u8 rdsin=0,rdsout=0,rdsstat=0; static unsigned char rdsbuf[RDS_BUFFER]; -static int cadet_lock=0; +static spinlock_t cadet_io_lock; static int cadet_probe(void); static struct pci_dev *dev; @@ -57,37 +59,19 @@ */ static __u16 sigtable[2][4]={{5,10,30,150},{28,40,63,1000}}; -void cadet_wake(unsigned long qnum) -{ - switch(qnum) { - case 0: /* cadet_setfreq */ - wake_up(&tunerq); - break; - case 1: /* cadet_getrds */ - wake_up(&rdsq); - break; - } -} - - - static int cadet_getrds(void) { int rdsstat=0; - cadet_lock++; + spin_lock(&cadet_io_lock); outb(3,io); /* Select Decoder Control/Status */ outb(inb(io+1)&0x7f,io+1); /* Reset RDS detection */ - cadet_lock--; - init_timer(&rdstimer); - rdstimer.function=cadet_wake; - rdstimer.data=(unsigned long)1; - rdstimer.expires=jiffies+(HZ/10); - init_waitqueue_head(&rdsq); - add_timer(&rdstimer); - sleep_on(&rdsq); + spin_unlock(&cadet_io_lock); - cadet_lock++; + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ/10); + + spin_lock(&cadet_io_lock); outb(3,io); /* Select Decoder Control/Status */ if((inb(io+1)&0x80)!=0) { rdsstat|=VIDEO_TUNER_RDS_ON; @@ -95,32 +79,24 @@ if((inb(io+1)&0x10)!=0) { rdsstat|=VIDEO_TUNER_MBS_ON; } - cadet_lock--; + spin_unlock(&cadet_io_lock); return rdsstat; + } - - - static int cadet_getstereo(void) { - if(curtuner!=0) { /* Only FM has stereo capability! */ + int ret = 0; + if(curtuner != 0) /* Only FM has stereo capability! */ return 0; - } - cadet_lock++; + spin_lock(&cadet_io_lock); outb(7,io); /* Select tuner control */ - if((inb(io+1)&0x40)==0) { - cadet_lock--; - return 1; /* Stereo pilot detected */ - } - else { - cadet_lock--; - return 0; /* Mono */ - } + if( (inb(io+1) & 0x40) == 0) + ret = 1; + spin_unlock(&cadet_io_lock); + return ret; } - - static unsigned cadet_gettune(void) { int curvol,i; @@ -129,7 +105,9 @@ /* * Prepare for read */ - cadet_lock++; + + spin_lock(&cadet_io_lock); + outb(7,io); /* Select tuner control */ curvol=inb(io+1); /* Save current volume/mute setting */ outb(0x00,io+1); /* Ensure WRITE-ENABLE is LOW */ @@ -151,7 +129,8 @@ * Restore volume/mute setting */ outb(curvol,io+1); - cadet_lock--; + + spin_unlock(&cadet_io_lock); return fifo; } @@ -197,7 +176,8 @@ int i; unsigned test; - cadet_lock++; + spin_lock(&cadet_io_lock); + outb(7,io); /* Select tuner control */ /* * Write the shift register @@ -216,7 +196,7 @@ test=0x1c|((fifo>>23)&0x02); outb(test,io+1); } - cadet_lock--; + spin_unlock(&cadet_io_lock); } @@ -252,92 +232,92 @@ /* * Save current volume/mute setting */ - cadet_lock++; + + spin_lock(&cadet_io_lock); outb(7,io); /* Select tuner control */ curvol=inb(io+1); + spin_unlock(&cadet_io_lock); /* * Tune the card */ for(j=3;j>-1;j--) { cadet_settune(fifo|(j<<16)); + + spin_lock(&cadet_io_lock); outb(7,io); /* Select tuner control */ outb(curvol,io+1); - cadet_lock--; - init_timer(&tunertimer); - tunertimer.function=cadet_wake; - tunertimer.data=(unsigned long)0; - tunertimer.expires=jiffies+(HZ/10); - init_waitqueue_head(&tunerq); - add_timer(&tunertimer); - sleep_on(&tunerq); + spin_unlock(&cadet_io_lock); + + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ/10); + cadet_gettune(); - if((tunestat&0x40)==0) { /* Tuned */ + if((tunestat & 0x40) == 0) { /* Tuned */ sigstrength=sigtable[curtuner][j]; return; } - cadet_lock++; } - cadet_lock--; sigstrength=0; } static int cadet_getvol(void) { - cadet_lock++; + int ret = 0; + + spin_lock(&cadet_io_lock); + outb(7,io); /* Select tuner control */ - if((inb(io+1)&0x20)!=0) { - cadet_lock--; - return 0xffff; - } - else { - cadet_lock--; - return 0; - } + if((inb(io + 1) & 0x20) != 0) + ret = 0xffff; + + spin_unlock(&cadet_io_lock); + return ret; } static void cadet_setvol(int vol) { - cadet_lock++; + spin_lock(&cadet_io_lock); outb(7,io); /* Select tuner control */ - if(vol>0) { + if(vol>0) outb(0x20,io+1); - } - else { + else outb(0x00,io+1); - } - cadet_lock--; -} + spin_unlock(&cadet_io_lock); +} -void cadet_handler(unsigned long data) +static void cadet_handler(unsigned long data) { /* * Service the RDS fifo */ - if(cadet_lock==0) { + + if(spin_trylock(&cadet_io_lock)) + { outb(0x3,io); /* Select RDS Decoder Control */ if((inb(io+1)&0x20)!=0) { printk(KERN_CRIT "cadet: RDS fifo overflow\n"); } outb(0x80,io); /* Select RDS fifo */ while((inb(io)&0x80)!=0) { - rdsbuf[rdsin++]=inb(io+1); + rdsbuf[rdsin]=inb(io+1); if(rdsin==rdsout) { - printk(KERN_CRIT "cadet: RDS buffer overflow\n"); + printk(KERN_WARNING "cadet: RDS buffer overflow\n"); } + else rdsin++; } + spin_unlock(&cadet_io_lock); } /* * Service pending read */ - if( rdsin!=rdsout) { + if( rdsin!=rdsout) wake_up_interruptible(&readq); - } /* * Clean up and exit @@ -358,10 +338,10 @@ unsigned char readbuf[RDS_BUFFER]; if(rdsstat==0) { - cadet_lock++; + spin_lock(&cadet_io_lock); rdsstat=1; outb(0x80,io); /* Select RDS fifo */ - cadet_lock--; + spin_unlock(&cadet_io_lock); init_timer(&readtimer); readtimer.function=cadet_handler; readtimer.data=(unsigned long)0; @@ -369,17 +349,15 @@ add_timer(&readtimer); } if(rdsin==rdsout) { - if(nonblock) { + if(nonblock) return -EWOULDBLOCK; - } interruptible_sleep_on(&readq); } - while((i1)) { - return -EINVAL; - } + switch(v.tuner) { - case 0: + case 0: strcpy(v.name,"FM"); v.rangelow=1400; /* 87.5 MHz */ v.rangehigh=1728; /* 108.0 MHz */ v.flags=0; - v.mode=0; v.mode|=VIDEO_MODE_AUTO; - v.signal=sigstrength; - if(cadet_getstereo()==1) { + if(cadet_getstereo()==1) v.flags|=VIDEO_TUNER_STEREO_ON; - } + v.flags|=cadet_getrds(); - if(copy_to_user(arg,&v, sizeof(v))) { - return -EFAULT; - } break; - case 1: + case 1: strcpy(v.name,"AM"); v.rangelow=8320; /* 520 kHz */ v.rangehigh=26400; /* 1650 kHz */ - v.flags=0; - v.flags|=VIDEO_TUNER_LOW; - v.mode=0; - v.mode|=VIDEO_MODE_AUTO; - v.signal=sigstrength; - if(copy_to_user(arg,&v, sizeof(v))) { - return -EFAULT; - } + v.flags = VIDEO_TUNER_LOW; + v.mode = VIDEO_MODE_AUTO; break; + default: + return -EINVAL; } + v.signal=sigstrength; + if(copy_to_user(arg,&v, sizeof(v))) { + return -EFAULT; + } return 0; } case VIDIOCSTUNER: { struct video_tuner v; - if(copy_from_user(&v, arg, sizeof(v))) { + if(copy_from_user(&v, arg, sizeof(v))) return -EFAULT; - } - if((v.tuner<0)||(v.tuner>1)) { + + if(v.tuner < 0 || v.tuner > 1) return -EINVAL; - } + curtuner=v.tuner; return 0; } @@ -468,25 +434,24 @@ case VIDIOCSFREQ: if(copy_from_user(&freq, arg,sizeof(freq))) return -EFAULT; - if((curtuner==0)&&((freq<1400)||(freq>1728))) { + if(curtuner==0 && (freq<1400 || freq>1728)) return -EINVAL; - } - if((curtuner==1)&&((freq<8320)||(freq>26400))) { + if(curtuner==1 && (freq<8320 || freq>26400)) return -EINVAL; - } + cadet_setfreq(freq); return 0; + case VIDIOCGAUDIO: { struct video_audio v; memset(&v,0, sizeof(v)); v.flags=VIDEO_AUDIO_MUTABLE|VIDEO_AUDIO_VOLUME; - if(cadet_getstereo()==0) { + if(cadet_getstereo()==0) v.mode=VIDEO_SOUND_MONO; - } - else { - v.mode=VIDEO_SOUND_STEREO; - } + else + v.mode=VIDEO_SOUND_STEREO; + v.volume=cadet_getvol(); v.step=0xffff; strcpy(v.name, "Radio"); @@ -525,10 +490,8 @@ static void cadet_close(struct video_device *dev) { - if(rdsstat==1) { - del_timer(&readtimer); - rdsstat=0; - } + del_timer_sync(&readtimer); + rdsstat=0; users--; } @@ -557,13 +520,13 @@ if (!(dev->resource[0].flags & IORESOURCE_IO)) return -ENODEV; if (dev->activate(dev)<0) { - printk ("radio-cadet: isapnp configure failed (out of resources?)\n"); + printk(KERN_ERR "radio-cadet: isapnp configure failed (out of resources?)\n"); return -ENOMEM; } io = dev->resource[0].start; - printk ("radio-cadet: ISAPnP reports card at %#x\n", io); + printk(KERN_INFO "radio-cadet: ISAPnP reports card at %#x\n", io); return io; } @@ -594,6 +557,9 @@ static int __init cadet_init(void) { + + spin_lock_init(&cadet_io_lock); + /* * If a probe was requested then probe ISAPnP first (safest) */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/mtd/maps/Config.in linux.21pre4-ac2/drivers/mtd/maps/Config.in --- linux.21pre4/drivers/mtd/maps/Config.in 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/mtd/maps/Config.in 2003-01-29 17:18:17.000000000 +0000 @@ -19,7 +19,7 @@ if [ "$CONFIG_X86" = "y" ]; then dep_tristate ' CFI Flash device mapped on Photron PNC-2000' CONFIG_MTD_PNC2000 $CONFIG_MTD_CFI $CONFIG_MTD_PARTITIONS - dep_tristate ' CFI Flash device mapped on AMD SC520 CDP' CONFIG_MTD_SC520CDP $CONFIG_MTD_CFI + dep_tristate ' CFI Flash device mapped on AMD SC520 CDP' CONFIG_MTD_SC520CDP $CONFIG_MTD_CFI $CONFIG_MTD_CONCAT dep_tristate ' CFI Flash device mapped on AMD NetSc520' CONFIG_MTD_NETSC520 $CONFIG_MTD_CFI $CONFIG_MTD_PARTITIONS dep_tristate ' CFI Flash device mapped on Arcom SBC-GXx boards' CONFIG_MTD_SBC_GXX $CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_PARTITIONS dep_tristate ' CFI Flash device mapped on Arcom ELAN-104NC' CONFIG_MTD_ELAN_104NC $CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_PARTITIONS diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/3c523.c linux.21pre4-ac2/drivers/net/3c523.c --- linux.21pre4/drivers/net/3c523.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/3c523.c 2003-01-09 00:47:04.000000000 +0000 @@ -1125,7 +1125,7 @@ len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; if(len != skb->len) - memset((char *) p->xmit_cbuffs[p->xmit)count], 0, ETH_ZLEN); + memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN); memcpy((char *) p->xmit_cbuffs[p->xmit_count], (char *) (skb->data), skb->len); #if (NUM_XMIT_BUFFS == 1) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/3c527.c linux.21pre4-ac2/drivers/net/3c527.c --- linux.21pre4/drivers/net/3c527.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/3c527.c 2003-01-06 15:38:18.000000000 +0000 @@ -1086,6 +1086,12 @@ /* We will need this to flush the buffer out */ lp->tx_ring[lp->tx_ring_head].skb=skb; + if(skb->len < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + goto out; + } np->length = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; np->data = virt_to_bus(skb->data); @@ -1094,7 +1100,7 @@ wmb(); p->control &= ~CONTROL_EOL; /* Clear EOL on p */ - +out: restore_flags(flags); netif_wake_queue(dev); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/3c59x.c linux.21pre4-ac2/drivers/net/3c59x.c --- linux.21pre4/drivers/net/3c59x.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/3c59x.c 2003-01-29 17:18:58.000000000 +0000 @@ -166,6 +166,11 @@ - Rename wait_for_completion() to issue_and_wait() to avoid completion.h clash. + LK1.1.18ac 01Jul02 akpm + - Fix for undocumented transceiver power-up bit on some 3c566B's + (Donald Becker, Rahul Karnik) + + - See http://www.uow.edu.au/~andrewm/linux/#3c59x-2.3 for more details. - Also see Documentation/networking/vortex.txt */ @@ -181,8 +186,8 @@ #define DRV_NAME "3c59x" -#define DRV_VERSION "LK1.1.16" -#define DRV_RELDATE "19 July 2001" +#define DRV_VERSION "LK1.1.18-ac" +#define DRV_RELDATE "1 July 2002" @@ -400,7 +405,7 @@ EEPROM_8BIT=0x10, /* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */ HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100, INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800, - EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000 }; + EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000, WNO_XCVR_PWR=0x4000 }; enum vortex_chips { CH_3C590 = 0, @@ -509,7 +514,7 @@ HAS_HWCKSM, 128, }, {"3c556B Laptop Hurricane", PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR| - HAS_HWCKSM, 128, }, + WNO_XCVR_PWR|HAS_HWCKSM, 128, }, {"3c575 [Megahertz] 10/100 LAN CardBus", PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, @@ -1190,6 +1195,10 @@ if (vp->drv_flags & INVERT_MII_PWR) n |= 0x4000; outw(n, ioaddr + Wn2_ResetOptions); + if (vp->drv_flags & WNO_XCVR_PWR) { + EL3WINDOW(0); + outw(0x0800, ioaddr); + } } /* Extract our information from the EEPROM data. */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/82596.c linux.21pre4-ac2/drivers/net/82596.c --- linux.21pre4/drivers/net/82596.c 2003-01-29 16:26:41.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/82596.c 2003-01-06 15:38:15.000000000 +0000 @@ -1066,12 +1066,19 @@ struct i596_private *lp = (struct i596_private *) dev->priv; struct tx_cmd *tx_cmd; struct i596_tbd *tbd; - short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; + short length = skb->len; dev->trans_start = jiffies; DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%x) called\n", dev->name, skb->len, (unsigned int)skb->data)); + if(skb->len < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + length = ETH_ZLEN; + } netif_stop_queue(dev); tx_cmd = lp->tx_cmds + lp->next_tx_cmd; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/a2065.c linux.21pre4-ac2/drivers/net/a2065.c --- linux.21pre4/drivers/net/a2065.c 2003-01-29 16:26:41.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/a2065.c 2003-01-12 17:59:12.000000000 +0000 @@ -574,6 +574,15 @@ unsigned long flags; skblen = skb->len; + len = skblen; + + if(len < ETH_ZLEN) + { + len = ETH_ZLEN; + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + } save_flags(flags); cli(); @@ -595,7 +604,6 @@ } } #endif - len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; entry = lp->tx_new & lp->tx_ring_mod_mask; ib->btx_ring [entry].length = (-len) | 0xf000; ib->btx_ring [entry].misc = 0; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/am79c961a.c linux.21pre4-ac2/drivers/net/am79c961a.c --- linux.21pre4/drivers/net/am79c961a.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/am79c961a.c 2003-01-06 15:38:16.000000000 +0000 @@ -409,10 +409,19 @@ am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev) { struct dev_priv *priv = (struct dev_priv *)dev->priv; - unsigned int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; + unsigned int length = skb->len; unsigned int hdraddr, bufaddr; unsigned int head; unsigned long flags; + + /* FIXME: I thought the 79c961 could do padding - RMK ??? */ + if(length < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + length = ETH_ZLEN; + } head = priv->txhead; hdraddr = priv->txhdr + (head << 3); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/ariadne.c linux.21pre4-ac2/drivers/net/ariadne.c --- linux.21pre4/drivers/net/ariadne.c 2003-01-29 16:26:41.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/ariadne.c 2003-01-06 15:38:15.000000000 +0000 @@ -577,6 +577,7 @@ volatile struct Am79C960 *lance = (struct Am79C960*)dev->base_addr; int entry; unsigned long flags; + int len = skb->len; #if 0 if (ariadne_debug > 3) { @@ -587,6 +588,15 @@ } #endif + /* FIXME: is the 79C960 new enough to do its own padding right ? */ + if(skb->len < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + len = ETH_ZLEN; + } + /* Fill in a Tx ring entry */ #if 0 @@ -617,8 +627,7 @@ priv->tx_ring[entry]->TMD2 = swapw((u_short)-skb->len); priv->tx_ring[entry]->TMD3 = 0x0000; - memcpyw(priv->tx_buff[entry], (u_short *)skb->data, - skb->len <= ETH_ZLEN ? ETH_ZLEN : skb->len); + memcpyw(priv->tx_buff[entry], (u_short *)skb->data, len); #if 0 { diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/atarilance.c linux.21pre4-ac2/drivers/net/atarilance.c --- linux.21pre4/drivers/net/atarilance.c 2003-01-29 16:26:41.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/atarilance.c 2003-01-06 15:38:15.000000000 +0000 @@ -786,6 +786,22 @@ DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name, DREG )); + + /* The old LANCE chips doesn't automatically pad buffers to min. size. */ + len = skb->len; + if(len < ETH_ZLEN) + len = ETH_ZLEN; + /* PAM-Card has a bug: Can only send packets with even number of bytes! */ + else if (lp->cardtype == PAM_CARD && (len & 1)) + ++len; + + if(len > skb->len) + { + skb = skb_padto(skb, len); + if(skb == NULL) + return 0; + } + netif_stop_queue (dev); /* Fill in a Tx ring entry */ @@ -815,11 +831,6 @@ * last. */ - /* The old LANCE chips doesn't automatically pad buffers to min. size. */ - len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; - /* PAM-Card has a bug: Can only send packets with even number of bytes! */ - if (lp->cardtype == PAM_CARD && (len & 1)) - ++len; head->length = -len; head->misc = 0; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/bagetlance.c linux.21pre4-ac2/drivers/net/bagetlance.c --- linux.21pre4/drivers/net/bagetlance.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/bagetlance.c 2003-01-06 15:38:18.000000000 +0000 @@ -835,6 +835,19 @@ struct lance_tx_head *head; unsigned long flags; + /* The old LANCE chips doesn't automatically pad buffers to min. size. */ + len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; + /* PAM-Card has a bug: Can only send packets with even number of bytes! */ + if (lp->cardtype == PAM_CARD && (len & 1)) + ++len; + + if (len > skb->len) + { + skb = skb_padto(skb, len); + if(skb == NULL) + return 0; + } + /* Transmitter timeout, serious problems. */ if (dev->tbusy) { int tickssofar = jiffies - dev->trans_start; @@ -921,12 +934,6 @@ * last. */ - /* The old LANCE chips doesn't automatically pad buffers to min. size. */ - len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; - /* PAM-Card has a bug: Can only send packets with even number of bytes! */ - if (lp->cardtype == PAM_CARD && (len & 1)) - ++len; - head->length = -len; head->misc = 0; lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len ); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/Config.in linux.21pre4-ac2/drivers/net/Config.in --- linux.21pre4/drivers/net/Config.in 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/Config.in 2003-01-07 15:04:48.000000000 +0000 @@ -250,7 +250,7 @@ dep_tristate 'Packet Engines Hamachi GNIC-II support' CONFIG_HAMACHI $CONFIG_PCI dep_tristate 'Packet Engines Yellowfin Gigabit-NIC support (EXPERIMENTAL)' CONFIG_YELLOWFIN $CONFIG_PCI $CONFIG_EXPERIMENTAL dep_tristate 'Realtek 8169 Gigabit Ethernet support' CONFIG_R8169 $CONFIG_PCI -dep_tristate 'SysKonnect SK-98xx support' CONFIG_SK98LIN $CONFIG_PCI +dep_tristate 'SysKonnect SK-98xx and SK-95xx Gigabit Ethernet Adapter family support' CONFIG_SK98LIN $CONFIG_PCI dep_tristate 'Broadcom Tigon3 support' CONFIG_TIGON3 $CONFIG_PCI endmenu diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/declance.c linux.21pre4-ac2/drivers/net/declance.c --- linux.21pre4/drivers/net/declance.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/declance.c 2003-01-06 15:38:18.000000000 +0000 @@ -870,7 +870,15 @@ skblen = skb->len; - len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; + len = skblen; + + if(len < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + len = ETH_ZLEN; + } lp->stats.tx_bytes += len; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/depca.c linux.21pre4-ac2/drivers/net/depca.c --- linux.21pre4/drivers/net/depca.c 2003-01-29 16:26:40.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/depca.c 2003-01-06 15:38:15.000000000 +0000 @@ -862,6 +862,13 @@ if (skb->len < 1) goto out; + if(skb->len < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + goto out; + } + netif_stop_queue(dev); if (TX_BUFFS_AVAIL) { /* Fill in a Tx ring entry */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/e2100.c linux.21pre4-ac2/drivers/net/e2100.c --- linux.21pre4/drivers/net/e2100.c 2003-01-29 16:26:40.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/e2100.c 2003-01-28 19:16:46.000000000 +0000 @@ -72,12 +72,12 @@ #define E21_SAPROM 0x10 /* Offset to station address data. */ #define E21_IO_EXTENT 0x20 -static inline void mem_on(short port, volatile char *mem_base, +static inline void mem_on(short port, unsigned long mem_base, unsigned char start_page ) { /* This is a little weird: set the shared memory window by doing a read. The low address bits specify the starting page. */ - readb(mem_base+start_page); + isa_readb(mem_base+start_page); inb(port + E21_MEM_ENABLE); outb(E21_MEM_ON, port + E21_MEM_ENABLE + E21_MEM_ON); } @@ -300,15 +300,15 @@ { short ioaddr = dev->base_addr; - char *shared_mem = (char *)dev->mem_start; + unsigned long shared_mem = dev->mem_start; mem_on(ioaddr, shared_mem, ring_page); #ifdef notdef /* Officially this is what we are doing, but the readl() is faster */ - memcpy_fromio(hdr, shared_mem, sizeof(struct e8390_pkt_hdr)); + isa_memcpy_fromio(hdr, shared_mem, sizeof(struct e8390_pkt_hdr)); #else - ((unsigned int*)hdr)[0] = readl(shared_mem); + ((unsigned int*)hdr)[0] = isa_readl(shared_mem); #endif /* Turn off memory access: we would need to reprogram the window anyway. */ @@ -323,12 +323,11 @@ e21_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { short ioaddr = dev->base_addr; - char *shared_mem = (char *)dev->mem_start; - mem_on(ioaddr, shared_mem, (ring_offset>>8)); + mem_on(ioaddr, dev->mem_start, (ring_offset>>8)); /* Packet is always in one chunk -- we can copy + cksum. */ - eth_io_copy_and_sum(skb, dev->mem_start + (ring_offset & 0xff), count, 0); + isa_eth_io_copy_and_sum(skb, dev->mem_start + (ring_offset & 0xff), count, 0); mem_off(ioaddr); } @@ -338,14 +337,14 @@ int start_page) { short ioaddr = dev->base_addr; - volatile char *shared_mem = (char *)dev->mem_start; + unsigned long shared_mem = dev->mem_start; /* Set the shared memory window start by doing a read, with the low address bits specifying the starting page. */ - readb(shared_mem + start_page); + isa_readb(shared_mem + start_page); mem_on(ioaddr, shared_mem, start_page); - memcpy_toio(shared_mem, buf, count); + isa_memcpy_toio(shared_mem, buf, count); mem_off(ioaddr); } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/eepro.c linux.21pre4-ac2/drivers/net/eepro.c --- linux.21pre4/drivers/net/eepro.c 2003-01-29 16:26:40.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/eepro.c 2003-01-06 15:38:15.000000000 +0000 @@ -1128,17 +1128,24 @@ struct eepro_local *lp = (struct eepro_local *)dev->priv; unsigned long flags; int ioaddr = dev->base_addr; + short length = skb->len; if (net_debug > 5) printk(KERN_DEBUG "%s: entering eepro_send_packet routine.\n", dev->name); + if(length < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + length = ETH_ZLEN; + } netif_stop_queue (dev); eepro_dis_int(ioaddr); spin_lock_irqsave(&lp->lock, flags); { - short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; unsigned char *buf = skb->data; if (hardware_send_packet(dev, buf, length)) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/eexpress.c linux.21pre4-ac2/drivers/net/eexpress.c --- linux.21pre4/drivers/net/eexpress.c 2003-01-29 16:26:40.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/eexpress.c 2003-01-06 15:38:15.000000000 +0000 @@ -640,6 +640,7 @@ static int eexp_xmit(struct sk_buff *buf, struct net_device *dev) { struct net_local *lp = (struct net_local *)dev->priv; + short length = buf->len; #ifdef CONFIG_SMP unsigned long flags; #endif @@ -648,6 +649,14 @@ printk(KERN_DEBUG "%s: eexp_xmit()\n", dev->name); #endif + if(buf->len < ETH_ZLEN) + { + buf = skb_padto(buf, ETH_ZLEN); + if(buf == NULL) + return 0; + length = buf->len; + } + disable_irq(dev->irq); /* @@ -660,8 +669,6 @@ #endif { - unsigned short length = (ETH_ZLEN < buf->len) ? buf->len : - ETH_ZLEN; unsigned short *data = (unsigned short *)buf->data; lp->stats.tx_bytes += length; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/epic100.c linux.21pre4-ac2/drivers/net/epic100.c --- linux.21pre4/drivers/net/epic100.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/epic100.c 2003-01-06 15:38:16.000000000 +0000 @@ -975,6 +975,13 @@ int entry, free_count; u32 ctrl_word; long flags; + + if(skb->len < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + } /* Caution: the write order is important here, set the field with the "ownership" bit last. */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/eth16i.c linux.21pre4-ac2/drivers/net/eth16i.c --- linux.21pre4/drivers/net/eth16i.c 2003-01-29 16:26:41.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/eth16i.c 2003-01-06 15:38:15.000000000 +0000 @@ -1056,10 +1056,17 @@ struct eth16i_local *lp = (struct eth16i_local *)dev->priv; int ioaddr = dev->base_addr; int status = 0; - ushort length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; + ushort length = skb->len; unsigned char *buf = skb->data; unsigned long flags; + if(length < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + length = ETH_ZLEN; + } netif_stop_queue(dev); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/fmv18x.c linux.21pre4-ac2/drivers/net/fmv18x.c --- linux.21pre4/drivers/net/fmv18x.c 2003-01-29 16:26:40.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/fmv18x.c 2003-01-06 15:38:15.000000000 +0000 @@ -369,7 +369,7 @@ { struct net_local *lp = dev->priv; int ioaddr = dev->base_addr; - short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; + short length = skb->len; unsigned char *buf = skb->data; unsigned long flags; @@ -381,6 +381,15 @@ dev->name, length); return 1; } + + if (length < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + length = ETH_ZLEN; + } + if (net_debug > 4) printk("%s: Transmitting a packet of length %lu.\n", dev->name, (unsigned long)skb->len); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/hp100.c linux.21pre4-ac2/drivers/net/hp100.c --- linux.21pre4/drivers/net/hp100.c 2003-01-29 16:26:40.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/hp100.c 2003-01-06 15:38:15.000000000 +0000 @@ -1542,6 +1542,13 @@ if (skb->len <= 0) return 0; + + if (skb->len < ETH_ZLEN && lp->chip == HP100_CHIPID_SHASTA) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + } /* Get Tx ring tail pointer */ if (lp->txrtail->next == lp->txrhead) { @@ -2099,6 +2106,7 @@ struct hp100_private *lp = (struct hp100_private *) dev->priv; #ifdef HP100_DEBUG_B + int ioaddr = dev->base_addr; hp100_outw(0x4216, TRACE); printk("hp100: %s: misc_interrupt\n", dev->name); #endif @@ -2538,6 +2546,11 @@ return HP100_LAN_10; if (val_10 & HP100_AUI_ST) { /* have we BNC or AUI onboard? */ + /* + * This can be overriden by dos utility, so if this has no effect, + * perhaps you need to download that utility from HP and set card + * back to "auto detect". + */ val_10 |= HP100_AUI_SEL | HP100_LOW_TH; hp100_page(MAC_CTRL); hp100_outb(val_10, 10_LAN_CFG_1); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/lance.c linux.21pre4-ac2/drivers/net/lance.c --- linux.21pre4/drivers/net/lance.c 2003-01-29 16:26:40.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/lance.c 2003-01-06 15:38:15.000000000 +0000 @@ -893,8 +893,15 @@ /* The old LANCE chips doesn't automatically pad buffers to min. size. */ if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) { - lp->tx_ring[entry].length = - -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN); + if(skb->len < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + goto out; + lp->tx_ring[entry].length = -ETH_ZLEN; + } + else + lp->tx_ring[entry].length = -skb->len; } else lp->tx_ring[entry].length = -skb->len; @@ -927,6 +934,7 @@ if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE) netif_stop_queue(dev); +out: spin_unlock_irqrestore(&lp->devlock, flags); return 0; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/lasi_82596.c linux.21pre4-ac2/drivers/net/lasi_82596.c --- linux.21pre4/drivers/net/lasi_82596.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/lasi_82596.c 2003-01-06 15:38:19.000000000 +0000 @@ -1084,12 +1084,20 @@ struct i596_private *lp = (struct i596_private *) dev->priv; struct tx_cmd *tx_cmd; struct i596_tbd *tbd; - short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; + short length = skb->len; dev->trans_start = jiffies; DEB(DEB_STARTTX,printk("%s: i596_start_xmit(%x,%p) called\n", dev->name, skb->len, skb->data)); + if(length < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + length = ETH_ZLEN; + } + netif_stop_queue(dev); tx_cmd = lp->tx_cmds + lp->next_tx_cmd; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/lp486e.c linux.21pre4-ac2/drivers/net/lp486e.c --- linux.21pre4/drivers/net/lp486e.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/lp486e.c 2003-01-06 15:38:18.000000000 +0000 @@ -886,7 +886,16 @@ if (skb->len <= 0) return 0; - length = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; + length = skb->len; + + if(length < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + length = ETH_ZLEN; + } + dev->trans_start = jiffies; tx_cmd = (struct tx_cmd *) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/pcmcia/fmvj18x_cs.c linux.21pre4-ac2/drivers/net/pcmcia/fmvj18x_cs.c --- linux.21pre4/drivers/net/pcmcia/fmvj18x_cs.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/pcmcia/fmvj18x_cs.c 2003-01-06 15:38:19.000000000 +0000 @@ -952,11 +952,19 @@ { struct local_info_t *lp = (struct local_info_t *)dev->priv; ioaddr_t ioaddr = dev->base_addr; + short length = skb->len; + + if(length < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + length = ETH_ZLEN; + } netif_stop_queue(dev); { - short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; unsigned char *buf = skb->data; if (length > ETH_FRAME_LEN) { diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/pcmcia/ray_cs.c linux.21pre4-ac2/drivers/net/pcmcia/ray_cs.c --- linux.21pre4/drivers/net/pcmcia/ray_cs.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/pcmcia/ray_cs.c 2003-01-06 15:38:19.000000000 +0000 @@ -1054,7 +1054,7 @@ { ray_dev_t *local = dev->priv; dev_link_t *link = local->finder; - short length; + short length = skb->len; if (!(link->state & DEV_PRESENT)) { DEBUG(2,"ray_dev_start_xmit - device not present\n"); @@ -1070,7 +1070,13 @@ } } - length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; + if(length < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + length = ETH_ZLEN; + } switch (ray_hw_xmit( skb->data, length, dev, DATA_TYPE)) { case XMIT_NO_CCS: case XMIT_NEED_AUTH: diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/pcmcia/xirc2ps_cs.c linux.21pre4-ac2/drivers/net/pcmcia/xirc2ps_cs.c --- linux.21pre4/drivers/net/pcmcia/xirc2ps_cs.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/pcmcia/xirc2ps_cs.c 2003-01-06 15:38:19.000000000 +0000 @@ -1548,7 +1548,6 @@ DEBUG(1, "do_start_xmit(skb=%p, dev=%p) len=%u\n", skb, dev, pktlen); - netif_stop_queue(dev); /* adjust the packet length to min. required * and hope that the buffer is large enough @@ -1558,8 +1557,14 @@ * pad this in his buffer with random bytes */ if (pktlen < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; pktlen = ETH_ZLEN; + } + netif_stop_queue(dev); SelectPage(0); PutWord(XIRCREG0_TRS, (u_short)pktlen+2); freespace = GetWord(XIRCREG0_TSO); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/pcmcia/xircom_tulip_cb.c linux.21pre4-ac2/drivers/net/pcmcia/xircom_tulip_cb.c --- linux.21pre4/drivers/net/pcmcia/xircom_tulip_cb.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/pcmcia/xircom_tulip_cb.c 2003-01-06 15:38:19.000000000 +0000 @@ -923,6 +923,14 @@ /* Calculate the next Tx descriptor entry. */ entry = tp->cur_tx % TX_RING_SIZE; + /* Seems to be needed even though the docs disagree */ + if(skb->len < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + } + tp->tx_skbuff[entry] = skb; #ifdef CARDBUS if (tp->chip_id == X3201_3) { diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/pcnet32.c linux.21pre4-ac2/drivers/net/pcnet32.c --- linux.21pre4/drivers/net/pcnet32.c 2003-01-29 16:26:41.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/pcnet32.c 2003-01-06 18:23:43.000000000 +0000 @@ -1,5 +1,5 @@ -/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */ -/* +/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. + * * Copyright 1996-1999 Thomas Bogendoerfer * * Derived from the lance driver written 1993,1994,1995 by Donald Becker. @@ -549,7 +549,7 @@ /* initialize variables */ fdx = mii = fset = dxsuflo = ltint = 0; chip_version = (chip_version >> 12) & 0xffff; - + switch (chip_version) { case 0x2420: chipname = "PCnet/PCI 79C970"; /* PCI */ @@ -1194,19 +1194,12 @@ if (err_status & 0x04000000) lp->stats.tx_aborted_errors++; if (err_status & 0x08000000) lp->stats.tx_carrier_errors++; if (err_status & 0x10000000) lp->stats.tx_window_errors++; -#ifndef DO_DXSUFLO if (err_status & 0x40000000) { lp->stats.tx_fifo_errors++; - /* Ackk! On FIFO errors the Tx unit is turned off! */ - /* Remove this verbosity later! */ - printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", - dev->name, csr0); - must_restart = 1; - } -#else - if (err_status & 0x40000000) { - lp->stats.tx_fifo_errors++; - if (! lp->dxsuflo) { /* If controller doesn't recover ... */ +#ifdef DO_DXSUFLO + if (! lp->dxsuflo) +#endif + { /* If controller doesn't recover ... */ /* Ackk! On FIFO errors the Tx unit is turned off! */ /* Remove this verbosity later! */ printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", @@ -1214,7 +1207,6 @@ must_restart = 1; } } -#endif } else { if (status & 0x1800) lp->stats.collisions++; @@ -1754,12 +1746,13 @@ } } + module_init(pcnet32_init_module); module_exit(pcnet32_cleanup_module); /* * Local variables: - * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c pcnet32.c" + * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/include/linux -Wall -Wstrict-prototypes -O2 -m486 -c pcnet32.c" * c-indent-level: 4 * tab-width: 8 * End: diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/r8169.c linux.21pre4-ac2/drivers/net/r8169.c --- linux.21pre4/drivers/net/r8169.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/r8169.c 2003-01-06 15:38:20.000000000 +0000 @@ -819,6 +819,13 @@ void *ioaddr = tp->mmio_addr; int entry = tp->cur_tx % NUM_TX_DESC; + if(skb->len < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + } + spin_lock_irq(&tp->lock); if ((tp->TxDescArray[entry].status & OWNbit) == 0) { diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/seeq8005.c linux.21pre4-ac2/drivers/net/seeq8005.c --- linux.21pre4/drivers/net/seeq8005.c 2003-01-29 16:26:41.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/seeq8005.c 2003-01-06 15:38:15.000000000 +0000 @@ -378,9 +378,16 @@ static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev) { struct net_local *lp = (struct net_local *)dev->priv; - short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; + short length = skb->len; unsigned char *buf = skb->data; + if(length < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + length = ETH_ZLEN; + } /* Block a timer-based transmit from overlapping */ netif_stop_queue(dev); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/build_no.c linux.21pre4-ac2/drivers/net/sk98lin/build_no.c --- linux.21pre4/drivers/net/sk98lin/build_no.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/net/sk98lin/build_no.c 2003-01-06 15:38:18.000000000 +0000 @@ -0,0 +1,10 @@ +/****************************************************************************** + * + * Name: Build_No.c + * Version: 1.01 + * + */ + +static const char SysKonnectBuildNumber[] = + "@(#)SK-BUILD: 6.02 (20021219) PL: ALL.01"; + \ No newline at end of file diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/lm80.h linux.21pre4-ac2/drivers/net/sk98lin/h/lm80.h --- linux.21pre4/drivers/net/sk98lin/h/lm80.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/lm80.h 2003-01-06 15:38:18.000000000 +0000 @@ -2,8 +2,8 @@ * * Name: lm80.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.3 $ - * Date: $Date: 1999/11/22 13:41:19 $ + * Version: $Revision: 1.4 $ + * Date: $Date: 2002/04/25 11:04:10 $ * Purpose: Contains all defines for the LM80 Chip * (National Semiconductor). * @@ -11,8 +11,7 @@ /****************************************************************************** * - * (C)Copyright 1998,1999 SysKonnect, - * a business unit of Schneider & Koch & Co. Datensysteme GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,6 +26,9 @@ * * History: * $Log: lm80.h,v $ + * Revision 1.4 2002/04/25 11:04:10 rschmidt + * Editorial changes + * * Revision 1.3 1999/11/22 13:41:19 cgoos * Changed license header to GPL. * @@ -53,23 +55,23 @@ * * All registers are 8 bit wide */ -#define LM80_CFG 0x00 /* Configuration Register */ -#define LM80_ISRC_1 0x01 /* Interrupt Status Register 1 */ -#define LM80_ISRC_2 0x02 /* Interrupt Status Register 2 */ -#define LM80_IMSK_1 0x03 /* Interrupt Mask Register 1 */ -#define LM80_IMSK_2 0x04 /* Interrupt Mask Register 2 */ +#define LM80_CFG 0x00 /* Configuration Register */ +#define LM80_ISRC_1 0x01 /* Interrupt Status Register 1 */ +#define LM80_ISRC_2 0x02 /* Interrupt Status Register 2 */ +#define LM80_IMSK_1 0x03 /* Interrupt Mask Register 1 */ +#define LM80_IMSK_2 0x04 /* Interrupt Mask Register 2 */ #define LM80_FAN_CTRL 0x05 /* Fan Devisor/RST#/OS# Register */ #define LM80_TEMP_CTRL 0x06 /* OS# Config, Temp Res. Reg */ /* 0x07 - 0x1f reserved */ /* current values */ -#define LM80_VT0_IN 0x20 /* current Voltage 0 value */ -#define LM80_VT1_IN 0x21 /* current Voltage 1 value */ -#define LM80_VT2_IN 0x22 /* current Voltage 2 value */ -#define LM80_VT3_IN 0x23 /* current Voltage 3 value */ -#define LM80_VT4_IN 0x24 /* current Voltage 4 value */ -#define LM80_VT5_IN 0x25 /* current Voltage 5 value */ -#define LM80_VT6_IN 0x26 /* current Voltage 6 value */ -#define LM80_TEMP_IN 0x27 /* current temperature value */ +#define LM80_VT0_IN 0x20 /* current Voltage 0 value */ +#define LM80_VT1_IN 0x21 /* current Voltage 1 value */ +#define LM80_VT2_IN 0x22 /* current Voltage 2 value */ +#define LM80_VT3_IN 0x23 /* current Voltage 3 value */ +#define LM80_VT4_IN 0x24 /* current Voltage 4 value */ +#define LM80_VT5_IN 0x25 /* current Voltage 5 value */ +#define LM80_VT6_IN 0x26 /* current Voltage 6 value */ +#define LM80_TEMP_IN 0x27 /* current Temperature value */ #define LM80_FAN1_IN 0x28 /* current Fan 1 count */ #define LM80_FAN2_IN 0x29 /* current Fan 2 count */ /* limit values */ @@ -111,23 +113,23 @@ /* LM80_ISRC_1 Interrupt Status Register 1 */ /* LM80_IMSK_1 Interrupt Mask Register 1 */ -#define LM80_IS_VT0 (1<<0) /* limit exceeded for Voltage 0 */ -#define LM80_IS_VT1 (1<<1) /* limit exceeded for Voltage 1 */ -#define LM80_IS_VT2 (1<<2) /* limit exceeded for Voltage 2 */ -#define LM80_IS_VT3 (1<<3) /* limit exceeded for Voltage 3 */ -#define LM80_IS_VT4 (1<<4) /* limit exceeded for Voltage 4 */ -#define LM80_IS_VT5 (1<<5) /* limit exceeded for Voltage 5 */ -#define LM80_IS_VT6 (1<<6) /* limit exceeded for Voltage 6 */ +#define LM80_IS_VT0 (1<<0) /* limit exceeded for Voltage 0 */ +#define LM80_IS_VT1 (1<<1) /* limit exceeded for Voltage 1 */ +#define LM80_IS_VT2 (1<<2) /* limit exceeded for Voltage 2 */ +#define LM80_IS_VT3 (1<<3) /* limit exceeded for Voltage 3 */ +#define LM80_IS_VT4 (1<<4) /* limit exceeded for Voltage 4 */ +#define LM80_IS_VT5 (1<<5) /* limit exceeded for Voltage 5 */ +#define LM80_IS_VT6 (1<<6) /* limit exceeded for Voltage 6 */ #define LM80_IS_INT_IN (1<<7) /* state of INT_IN# */ /* LM80_ISRC_2 Interrupt Status Register 2 */ /* LM80_IMSK_2 Interrupt Mask Register 2 */ #define LM80_IS_TEMP (1<<0) /* HOT temperature limit exceeded */ -#define LM80_IS_BTI (1<<1) /* state of BTI# pin */ +#define LM80_IS_BTI (1<<1) /* state of BTI# pin */ #define LM80_IS_FAN1 (1<<2) /* count limit exceeded for Fan 1 */ #define LM80_IS_FAN2 (1<<3) /* count limit exceeded for Fan 2 */ -#define LM80_IS_CI (1<<4) /* Chassis Intrusion occured */ -#define LM80_IS_OS (1<<5) /* OS temperature limit exceeded */ +#define LM80_IS_CI (1<<4) /* Chassis Intrusion occured */ +#define LM80_IS_OS (1<<5) /* OS temperature limit exceeded */ /* bit 6 and 7 are reserved in LM80_ISRC_2 */ #define LM80_IS_HT_IRQ_MD (1<<6) /* Hot temperature interrupt mode */ #define LM80_IS_OT_IRQ_MD (1<<7) /* OS temperature interrupt mode */ @@ -181,7 +183,7 @@ /* LM80_FAN2_COUNT_LIM Fan 2 count limit (low) */ /* 0x3e - 0x3f reserved */ -#define LM80_ADDR 0x28 /* LM80 default addr */ +#define LM80_ADDR 0x28 /* LM80 default addr */ /* typedefs *******************************************************************/ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/skaddr.h linux.21pre4-ac2/drivers/net/sk98lin/h/skaddr.h --- linux.21pre4/drivers/net/sk98lin/h/skaddr.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/skaddr.h 2003-01-06 15:38:18.000000000 +0000 @@ -2,8 +2,8 @@ * * Name: skaddr.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.24 $ - * Date: $Date: 2001/01/22 13:41:34 $ + * Version: $Revision: 1.26 $ + * Date: $Date: 2002/11/15 07:24:42 $ * Purpose: Header file for Address Management (MC, UC, Prom). * ******************************************************************************/ @@ -26,6 +26,14 @@ * History: * * $Log: skaddr.h,v $ + * Revision 1.26 2002/11/15 07:24:42 tschilli + * SK_ADDR_EQUAL macro fixed. + * + * Revision 1.25 2002/06/10 13:55:18 tschilli + * Changes for handling YUKON. + * All changes are internally and not visible to the programmer + * using this module. + * * Revision 1.24 2001/01/22 13:41:34 rassmann * Supporting two nets on dual-port adapters. * @@ -144,7 +152,7 @@ /* ----- Common return values ----- */ #define SK_ADDR_SUCCESS 0 /* Function returned successfully. */ -#define SK_ADDR_ILLEGAL_PORT 100 /* Port number too high. */ +#define SK_ADDR_ILLEGAL_PORT 100 /* Port number too high. */ #define SK_ADDR_TOO_EARLY 101 /* Function called too early. */ /* ----- Clear/Add flag bits ----- */ @@ -198,6 +206,7 @@ /* Macros */ +#if 0 #ifndef SK_ADDR_EQUAL /* * "&" instead of "&&" allows better optimization on IA-64. @@ -217,6 +226,23 @@ (*(SK_U32 *)&(((SK_U8 *)(A1))[0]) == *(SK_U32 *)&(((SK_U8 *)(A2))[0]))) #endif /* SK_ADDR_DWORD_COMPARE */ #endif /* SK_ADDR_EQUAL */ +#endif /* 0 */ + +#ifndef SK_ADDR_EQUAL +#ifndef SK_ADDR_DWORD_COMPARE +#define SK_ADDR_EQUAL(A1,A2) ( \ + (((SK_U8 *)(A1))[5] == ((SK_U8 *)(A2))[5]) & \ + (((SK_U8 *)(A1))[4] == ((SK_U8 *)(A2))[4]) & \ + (((SK_U8 *)(A1))[3] == ((SK_U8 *)(A2))[3]) & \ + (((SK_U8 *)(A1))[2] == ((SK_U8 *)(A2))[2]) & \ + (((SK_U8 *)(A1))[1] == ((SK_U8 *)(A2))[1]) & \ + (((SK_U8 *)(A1))[0] == ((SK_U8 *)(A2))[0])) +#else /* SK_ADDR_DWORD_COMPARE */ +#define SK_ADDR_EQUAL(A1,A2) ( \ + (*(SK_U16 *)&(((SK_U8 *)(A1))[4]) == *(SK_U16 *)&(((SK_U8 *)(A2))[4])) && \ + (*(SK_U32 *)&(((SK_U8 *)(A1))[0]) == *(SK_U32 *)&(((SK_U8 *)(A2))[0]))) +#endif /* SK_ADDR_DWORD_COMPARE */ +#endif /* SK_ADDR_EQUAL */ /* typedefs *******************************************************************/ @@ -239,13 +265,13 @@ /* ----- Public part (read-only) ----- */ - SK_MAC_ADDR CurrentMacAddress; /* Current physical MAC Address. */ + SK_MAC_ADDR CurrentMacAddress; /* Current physical MAC Address. */ SK_MAC_ADDR PermanentMacAddress; /* Permanent physical MAC Address. */ - int PromMode; /* Promiscuous Mode. */ + int PromMode; /* Promiscuous Mode. */ /* ----- Private part ----- */ - SK_MAC_ADDR PreviousMacAddress; /* Prev. phys. MAC Address. */ + SK_MAC_ADDR PreviousMacAddress; /* Prev. phys. MAC Address. */ SK_BOOL CurrentMacAddressSet; /* CurrentMacAddress is set. */ SK_U8 Align01; @@ -255,18 +281,20 @@ SK_U32 NextExactMatchDrv; SK_MAC_ADDR Exact[SK_ADDR_EXACT_MATCHES]; SK_FILTER64 InexactFilter; /* For 64-bit hash register. */ + SK_FILTER64 InexactRlmtFilter; /* For 64-bit hash register. */ + SK_FILTER64 InexactDrvFilter; /* For 64-bit hash register. */ } SK_ADDR_PORT; struct s_AddrNet { /* ----- Public part (read-only) ----- */ - SK_MAC_ADDR CurrentMacAddress; /* Logical MAC Address. */ + SK_MAC_ADDR CurrentMacAddress; /* Logical MAC Address. */ SK_MAC_ADDR PermanentMacAddress; /* Logical MAC Address. */ /* ----- Private part ----- */ - SK_U32 ActivePort; /* View of module ADDR. */ + SK_U32 ActivePort; /* View of module ADDR. */ SK_BOOL CurrentMacAddressSet; /* CurrentMacAddress is set. */ SK_U8 Align01; SK_U16 Align02; @@ -294,38 +322,86 @@ extern int SkAddrInit( SK_AC *pAC, SK_IOC IoC, - int Level); + int Level); extern int SkAddrMcClear( SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber, - int Flags); + int Flags); + +extern int SkAddrXmacMcClear( + SK_AC *pAC, + SK_IOC IoC, + SK_U32 PortNumber, + int Flags); + +extern int SkAddrGmacMcClear( + SK_AC *pAC, + SK_IOC IoC, + SK_U32 PortNumber, + int Flags); extern int SkAddrMcAdd( SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber, SK_MAC_ADDR *pMc, - int Flags); + int Flags); + +extern int SkAddrXmacMcAdd( + SK_AC *pAC, + SK_IOC IoC, + SK_U32 PortNumber, + SK_MAC_ADDR *pMc, + int Flags); + +extern int SkAddrGmacMcAdd( + SK_AC *pAC, + SK_IOC IoC, + SK_U32 PortNumber, + SK_MAC_ADDR *pMc, + int Flags); extern int SkAddrMcUpdate( SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber); +extern int SkAddrXmacMcUpdate( + SK_AC *pAC, + SK_IOC IoC, + SK_U32 PortNumber); + +extern int SkAddrGmacMcUpdate( + SK_AC *pAC, + SK_IOC IoC, + SK_U32 PortNumber); + extern int SkAddrOverride( SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber, SK_MAC_ADDR *pNewAddr, - int Flags); + int Flags); extern int SkAddrPromiscuousChange( SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber, - int NewPromMode); + int NewPromMode); + +extern int SkAddrXmacPromiscuousChange( + SK_AC *pAC, + SK_IOC IoC, + SK_U32 PortNumber, + int NewPromMode); + +extern int SkAddrGmacPromiscuousChange( + SK_AC *pAC, + SK_IOC IoC, + SK_U32 PortNumber, + int NewPromMode); extern int SkAddrSwap( SK_AC *pAC, diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/skdebug.h linux.21pre4-ac2/drivers/net/sk98lin/h/skdebug.h --- linux.21pre4/drivers/net/sk98lin/h/skdebug.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/skdebug.h 2003-01-06 15:38:18.000000000 +0000 @@ -2,16 +2,15 @@ * * Name: skdebug.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.10 $ - * Date: $Date: 1999/11/22 13:47:40 $ + * Version: $Revision: 1.12 $ + * Date: $Date: 2002/07/15 15:37:13 $ * Purpose: SK specific DEBUG support * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998,1999 SysKonnect, - * a business unit of Schneider & Koch & Co. Datensysteme GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +25,13 @@ * * History: * $Log: skdebug.h,v $ + * Revision 1.12 2002/07/15 15:37:13 rschmidt + * Power Management support + * Editorial changes + * + * Revision 1.11 2002/04/25 11:04:39 rschmidt + * Editorial changes + * * Revision 1.10 1999/11/22 13:47:40 cgoos * Changed license header to GPL. * @@ -89,26 +95,25 @@ #define SK_DBGMOD_MERR 0x00000001L /* general module error indication */ #define SK_DBGMOD_HWM 0x00000002L /* Hardware init module */ -#define SK_DBGMOD_RLMT 0x00000004L /* RLMT module */ -#define SK_DBGMOD_VPD 0x00000008L /* VPD module */ -#define SK_DBGMOD_I2C 0x00000010L /* I2C module */ -#define SK_DBGMOD_PNMI 0x00000020L /* PNMI module */ -#define SK_DBGMOD_CSUM 0x00000040L /* CSUM module */ -#define SK_DBGMOD_ADDR 0x00000080L /* ADDR module */ +#define SK_DBGMOD_RLMT 0x00000004L /* RLMT module */ +#define SK_DBGMOD_VPD 0x00000008L /* VPD module */ +#define SK_DBGMOD_I2C 0x00000010L /* I2C module */ +#define SK_DBGMOD_PNMI 0x00000020L /* PNMI module */ +#define SK_DBGMOD_CSUM 0x00000040L /* CSUM module */ +#define SK_DBGMOD_ADDR 0x00000080L /* ADDR module */ #define SK_DBGMOD_PECP 0x00000100L /* PECP module */ +#define SK_DBGMOD_POWM 0x00000200L /* Power Management module */ /* Debug events */ -#define SK_DBGCAT_INIT 0x00000001L /* module/driver initialization */ -#define SK_DBGCAT_CTRL 0x00000002L /* controlling: add/rmv MCA/MAC - * and other controls (IOCTL) - */ -#define SK_DBGCAT_ERR 0x00000004L /* error handling paths */ -#define SK_DBGCAT_TX 0x00000008L /* transmit path */ -#define SK_DBGCAT_RX 0x00000010L /* receive path */ -#define SK_DBGCAT_IRQ 0x00000020L /* general IRQ handling */ -#define SK_DBGCAT_QUEUE 0x00000040L /* any queue management */ -#define SK_DBGCAT_DUMP 0x00000080L /* large data output e.g. hex dump */ -#define SK_DBGCAT_FATAL 0x00000100L /* large data output e.g. hex dump */ +#define SK_DBGCAT_INIT 0x00000001L /* module/driver initialization */ +#define SK_DBGCAT_CTRL 0x00000002L /* controlling devices */ +#define SK_DBGCAT_ERR 0x00000004L /* error handling paths */ +#define SK_DBGCAT_TX 0x00000008L /* transmit path */ +#define SK_DBGCAT_RX 0x00000010L /* receive path */ +#define SK_DBGCAT_IRQ 0x00000020L /* general IRQ handling */ +#define SK_DBGCAT_QUEUE 0x00000040L /* any queue management */ +#define SK_DBGCAT_DUMP 0x00000080L /* large data output e.g. hex dump */ +#define SK_DBGCAT_FATAL 0x00000100L /* fatal error */ #endif /* __INC_SKDEBUG_H */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/skdrv1st.h linux.21pre4-ac2/drivers/net/sk98lin/h/skdrv1st.h --- linux.21pre4/drivers/net/sk98lin/h/skdrv1st.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/skdrv1st.h 2003-01-06 15:38:18.000000000 +0000 @@ -2,8 +2,8 @@ * * Name: skdrv1st.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.9.2.1 $ - * Date: $Date: 2001/03/12 16:50:59 $ + * Version: $Revision: 1.9.2.2 $ + * Date: $Date: 2001/12/07 12:06:42 $ * Purpose: First header file for driver and all other modules * ******************************************************************************/ @@ -27,6 +27,9 @@ * History: * * $Log: skdrv1st.h,v $ + * Revision 1.9.2.2 2001/12/07 12:06:42 mlindner + * Fix: malloc -> slab changes + * * Revision 1.9.2.1 2001/03/12 16:50:59 mlindner * chg: kernel 2.4 adaption * @@ -84,6 +87,10 @@ #ifndef __INC_SKDRV1ST_H #define __INC_SKDRV1ST_H +/* Check kernel version */ +#include +#if (LINUX_VERSION_CODE > 0x020300) +#endif typedef struct s_AC SK_AC; @@ -116,7 +123,6 @@ #include #include #include -#include #include #include #include @@ -140,6 +146,8 @@ #define SK_BIG_ENDIAN #endif +#define SK_NET_DEVICE net_device + /* we use gethrtime(), return unit: nanoseconds */ #define SK_TICKS_PER_SEC HZ @@ -161,17 +169,17 @@ #define SK_MEMCPY(dest,src,size) memcpy(dest,src,size) #define SK_MEMCMP(s1,s2,size) memcmp(s1,s2,size) #define SK_MEMSET(dest,val,size) memset(dest,val,size) -#define SK_STRLEN(pStr) strlen((char*)pStr) -#define SK_STRNCPY(pDest,pSrc,size) strncpy((char*)pDest,(char*)pSrc,size) -#define SK_STRCMP(pStr1,pStr2) strcmp((char*)pStr1,(char*)pStr2) +#define SK_STRLEN(pStr) strlen((char*)(pStr)) +#define SK_STRNCPY(pDest,pSrc,size) strncpy((char*)(pDest),(char*)(pSrc),size) +#define SK_STRCMP(pStr1,pStr2) strcmp((char*)(pStr1),(char*)(pStr2)) /* macros to access the adapter */ -#define SK_OUT8(b,a,v) writeb(v, (b+a)) -#define SK_OUT16(b,a,v) writew(v, (b+a)) -#define SK_OUT32(b,a,v) writel(v, (b+a)) -#define SK_IN8(b,a,pv) (*(pv) = readb(b+a)) -#define SK_IN16(b,a,pv) (*(pv) = readw(b+a)) -#define SK_IN32(b,a,pv) (*(pv) = readl(b+a)) +#define SK_OUT8(b,a,v) writeb((v), ((b)+(a))) +#define SK_OUT16(b,a,v) writew((v), ((b)+(a))) +#define SK_OUT32(b,a,v) writel((v), ((b)+(a))) +#define SK_IN8(b,a,pv) (*(pv) = readb((b)+(a))) +#define SK_IN16(b,a,pv) (*(pv) = readw((b)+(a))) +#define SK_IN32(b,a,pv) (*(pv) = readl((b)+(a))) #define int8_t char #define int16_t short @@ -222,11 +230,11 @@ #define SK_DBGCAT_DRV_INT_SRC 0x04000000 #define SK_DBGCAT_DRV_EVENT 0x08000000 -#endif /* DEBUG */ +#endif #define SK_ERR_LOG SkErrorLog extern void SkErrorLog(SK_AC*, int, int, char*); -#endif /* __INC_SKDRV1ST_H */ +#endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/skdrv2nd.h linux.21pre4-ac2/drivers/net/sk98lin/h/skdrv2nd.h --- linux.21pre4/drivers/net/sk98lin/h/skdrv2nd.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/skdrv2nd.h 2003-01-06 15:38:18.000000000 +0000 @@ -2,8 +2,8 @@ * * Name: skdrv2nd.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.12.2.1 $ - * Date: $Date: 2001/03/12 16:50:59 $ + * Version: $Revision: 1.12.2.2 $ + * Date: $Date: 2001/09/05 12:14:50 $ * Purpose: Second header file for driver and all other modules * ******************************************************************************/ @@ -27,6 +27,9 @@ * History: * * $Log: skdrv2nd.h,v $ + * Revision 1.12.2.2 2001/09/05 12:14:50 mlindner + * add: New hardware revision int + * * Revision 1.12.2.1 2001/03/12 16:50:59 mlindner * chg: kernel 2.4 adaption * @@ -202,7 +205,7 @@ SK_U32 VDataLow; /* Receive buffer Addr, low dword */ SK_U32 VDataHigh; /* Receive buffer Addr, high dword */ SK_U32 FrameStat; /* Receive Frame Status word */ - SK_U32 TimeStamp; /* Time stamp from XMAX */ + SK_U32 TimeStamp; /* Time stamp from XMAC */ SK_U32 TcpSums; /* TCP Sum 2 / TCP Sum 1 */ SK_U32 TcpSumStarts; /* TCP Sum Start 2 / TCP Sum Start 1 */ RXD *pNextRxd; /* Pointer to next Rxd */ @@ -218,7 +221,8 @@ SK_U32 VDataHigh; /* Transmit Buffer Addr, high dword */ SK_U32 FrameStat; /* Transmit Frame Status Word */ SK_U32 TcpSumOfs; /* Reserved / TCP Sum Offset */ - SK_U32 TcpSumStWr; /* TCP Sum Start / TCP Sum Write */ + SK_U16 TcpSumSt; /* TCP Sum Start */ + SK_U16 TcpSumWr; /* TCP Sum Write */ SK_U32 TcpReserved; /* not used */ TXD *pNextTxd; /* Pointer to next Txd */ struct sk_buff *pMBuf; /* Pointer to Linux' socket buffer */ @@ -445,11 +449,11 @@ int BoardLevel; /* level of active hw init (0-2) */ char DeviceStr[80]; /* adapter string from vpd */ SK_U32 AllocFlag; /* flag allocation of resources */ - struct pci_dev PciDev; /* for access to pci config space */ + struct pci_dev *PciDev; /* for access to pci config space */ SK_U32 PciDevId; /* pci device id */ - struct net_device *dev[2]; /* pointer to device struct */ + struct SK_NET_DEVICE *dev[2]; /* pointer to device struct */ char Name[30]; /* driver name */ - struct net_device *Next; /* link all devices (for clearing) */ + struct SK_NET_DEVICE *Next; /* link all devices (for clearing) */ int RxBufSize; /* length of receive buffers */ struct net_device_stats stats; /* linux 'netstat -i' statistics */ int Index; /* internal board index number */ @@ -465,12 +469,12 @@ /* addresses for this board */ /* (may be more than HW can)*/ + int HWRevision; /* Hardware revision */ int ActivePort; /* the active XMAC port */ int MaxPorts; /* number of activated ports */ int TxDescrPerRing; /* # of descriptors per tx ring */ int RxDescrPerRing; /* # of descriptors per rx ring */ - caddr_t pDescrMem; /* Pointer to the descriptor area */ dma_addr_t pDescrMemDMA; /* PCI DMA address of area */ @@ -483,6 +487,11 @@ SK_U32 CsOfs; /* for checksum calculation */ SK_BOOL CheckQueue; /* check event queue soon */ + + /* Only for tests */ + int PortUp; + int PortDown; + }; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/skerror.h linux.21pre4-ac2/drivers/net/sk98lin/h/skerror.h --- linux.21pre4/drivers/net/sk98lin/h/skerror.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/skerror.h 2003-01-06 15:38:18.000000000 +0000 @@ -2,16 +2,15 @@ * * Name: skerror.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.4 $ - * Date: $Date: 1999/11/22 13:51:59 $ + * Version: $Revision: 1.5 $ + * Date: $Date: 2002/04/25 11:05:10 $ * Purpose: SK specific Error log support * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998,1999 SysKonnect, - * a business unit of Schneider & Koch & Co. Datensysteme GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +25,9 @@ * * History: * $Log: skerror.h,v $ + * Revision 1.5 2002/04/25 11:05:10 rschmidt + * Editorial changes + * * Revision 1.4 1999/11/22 13:51:59 cgoos * Changed license header to GPL. * @@ -49,29 +51,29 @@ #define _INC_SKERROR_H_ /* - * Define the Error Classes + * Define Error Classes */ -#define SK_ERRCL_OTHER (0) /* Other error */ +#define SK_ERRCL_OTHER (0) /* Other error */ #define SK_ERRCL_CONFIG (1L<<0) /* Configuration error */ #define SK_ERRCL_INIT (1L<<1) /* Initialization error */ -#define SK_ERRCL_NORES (1L<<2) /* Out of resources error */ -#define SK_ERRCL_SW (1L<<3) /* internal Software error */ -#define SK_ERRCL_HW (1L<<4) /* Hardware failure */ +#define SK_ERRCL_NORES (1L<<2) /* Out of Resources error */ +#define SK_ERRCL_SW (1L<<3) /* Internal Software error */ +#define SK_ERRCL_HW (1L<<4) /* Hardware Failure */ #define SK_ERRCL_COMM (1L<<5) /* Communication error */ /* - * Define Error code bases + * Define Error Code Bases */ -#define SK_ERRBASE_RLMT 100 /* Base Error number for RLMT */ -#define SK_ERRBASE_HWINIT 200 /* Base Error number for HWInit */ -#define SK_ERRBASE_VPD 300 /* Base Error number for VPD */ -#define SK_ERRBASE_PNMI 400 /* Base Error number for PNMI */ -#define SK_ERRBASE_CSUM 500 /* Base Error number for Checksum */ -#define SK_ERRBASE_SIRQ 600 /* Base Error number for Special IRQ */ -#define SK_ERRBASE_I2C 700 /* Base Error number for i2C module */ -#define SK_ERRBASE_QUEUE 800 /* Base Error number for Scheduler */ -#define SK_ERRBASE_ADDR 900 /* Base Error number for Address mod. */ +#define SK_ERRBASE_RLMT 100 /* Base Error number for RLMT */ +#define SK_ERRBASE_HWINIT 200 /* Base Error number for HWInit */ +#define SK_ERRBASE_VPD 300 /* Base Error number for VPD */ +#define SK_ERRBASE_PNMI 400 /* Base Error number for PNMI */ +#define SK_ERRBASE_CSUM 500 /* Base Error number for Checksum */ +#define SK_ERRBASE_SIRQ 600 /* Base Error number for Special IRQ */ +#define SK_ERRBASE_I2C 700 /* Base Error number for I2C module */ +#define SK_ERRBASE_QUEUE 800 /* Base Error number for Scheduler */ +#define SK_ERRBASE_ADDR 900 /* Base Error number for Address module */ #define SK_ERRBASE_PECP 1000 /* Base Error number for PECP */ #define SK_ERRBASE_DRV 1100 /* Base Error number for Driver */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/skgedrv.h linux.21pre4-ac2/drivers/net/sk98lin/h/skgedrv.h --- linux.21pre4/drivers/net/sk98lin/h/skgedrv.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/skgedrv.h 2003-01-06 15:38:18.000000000 +0000 @@ -2,16 +2,15 @@ * * Name: skgedrv.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.4 $ - * Date: $Date: 1999/11/22 13:52:46 $ + * Version: $Revision: 1.6 $ + * Date: $Date: 2002/07/15 15:38:01 $ * Purpose: Interface with the driver * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998,1999 SysKonnect, - * a business unit of Schneider & Koch & Co. Datensysteme GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,6 +26,13 @@ * History: * * $Log: skgedrv.h,v $ + * Revision 1.6 2002/07/15 15:38:01 rschmidt + * Power Management support + * Editorial changes + * + * Revision 1.5 2002/04/25 11:05:47 rschmidt + * Editorial changes + * * Revision 1.4 1999/11/22 13:52:46 cgoos * Changed license header to GPL. * @@ -49,17 +55,18 @@ /* * Define the driver events. - * Usually the events are defined by the destination module. In case of the - * driver we put the definition of the events here. + * Usually the events are defined by the destination module. + * In case of the driver we put the definition of the events here. */ -#define SK_DRV_PORT_RESET 1 /* The port needs to be reset */ -#define SK_DRV_NET_UP 2 /* The net is now operational */ -#define SK_DRV_NET_DOWN 3 /* The net is now down */ -#define SK_DRV_SWITCH_SOFT 4 /* Ports switch with both links conn */ -#define SK_DRV_SWITCH_HARD 5 /* Port switch due to link failure */ -#define SK_DRV_RLMT_SEND 6 /* Send a RLMT packet */ -#define SK_DRV_ADAP_FAIL 7 /* The whole adapter fails */ -#define SK_DRV_PORT_FAIL 8 /* One port fails */ -#define SK_DRV_SWITCH_INTERN 9 /* Port switch from driver to itself */ +#define SK_DRV_PORT_RESET 1 /* The port needs to be reset */ +#define SK_DRV_NET_UP 2 /* The net is operational */ +#define SK_DRV_NET_DOWN 3 /* The net is down */ +#define SK_DRV_SWITCH_SOFT 4 /* Ports switch with both links connected */ +#define SK_DRV_SWITCH_HARD 5 /* Port switch due to link failure */ +#define SK_DRV_RLMT_SEND 6 /* Send a RLMT packet */ +#define SK_DRV_ADAP_FAIL 7 /* The whole adapter fails */ +#define SK_DRV_PORT_FAIL 8 /* One port fails */ +#define SK_DRV_SWITCH_INTERN 9 /* Port switch by the driver itself */ +#define SK_DRV_POWER_DOWN 10 /* Power down mode */ #endif /* __INC_SKGEDRV_H_ */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/skgehw.h linux.21pre4-ac2/drivers/net/sk98lin/h/skgehw.h --- linux.21pre4/drivers/net/sk98lin/h/skgehw.h 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/skgehw.h 2003-01-06 15:38:18.000000000 +0000 @@ -2,16 +2,15 @@ * * Name: skgehw.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.36 $ - * Date: $Date: 2000/11/09 12:32:49 $ - * Purpose: Defines and Macros for the Gigabit Ethernet Adapter Product - * Family + * Version: $Revision: 1.48 $ + * Date: $Date: 2002/12/05 10:25:11 $ + * Purpose: Defines and Macros for the Gigabit Ethernet Adapter Product Family * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2000 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +25,61 @@ * * History: * $Log: skgehw.h,v $ + * Revision 1.48 2002/12/05 10:25:11 rschmidt + * Added defines for Half Duplex Burst Mode On/Off + * Added defines for Rx GMAC FIFO Flush feature + * Editorial changes + * + * Revision 1.47 2002/11/12 17:01:31 rschmidt + * Added defines for WOL_CTL_DEFAULT + * Editorial changes + * + * Revision 1.46 2002/10/14 14:47:57 rschmidt + * Corrected bit mask for HW self test results + * Added defines for WOL Registers + * Editorial changes + * + * Revision 1.45 2002/10/11 09:25:22 mkarl + * Added bit mask for HW self test results. + * + * Revision 1.44 2002/08/16 14:44:36 rschmidt + * Added define GPC_HWCFG_GMII_FIB for YUKON Fiber + * + * Revision 1.43 2002/08/12 13:31:50 rschmidt + * Corrected macros for GMAC Address Registers: GM_INADDR(), + * GM_OUTADDR(), GM_INHASH, GM_OUTHASH. + * Editorial changes + * + * Revision 1.42 2002/08/08 15:37:56 rschmidt + * Added defines for Power Management Capabilities + * Editorial changes + * + * Revision 1.41 2002/07/23 16:02:25 rschmidt + * Added macro WOL_REG() to access WOL reg. (HW-Bug in YUKON 1st rev.) + * + * Revision 1.40 2002/07/15 15:41:37 rschmidt + * Added new defines for Power Management Cap. & Control + * Editorial changes + * + * Revision 1.39 2002/06/10 09:37:07 rschmidt + * Added macros for the ADDR-Modul + * + * Revision 1.38 2002/06/05 08:15:19 rschmidt + * Added defines for WOL Registers + * Editorial changes + * + * Revision 1.37 2002/04/25 11:39:23 rschmidt + * Added new defines for PCI Our Register 1 + * Added new registers and defines for YUKON (Rx FIFO, Tx FIFO, + * Time Stamp Timer, GMAC Control, GPHY Control,Link Control, + * GMAC IRQ Source and Mask, Wake-up Frame Pattern Match); + * Added new defines for Control/Status (VAUX available) + * Added Chip ID for YUKON + * Added define for descriptors with UDP ext. for YUKON + * Added macros to access the GMAC + * Added new Phy Type for Marvell 88E1011S (GPHY) + * Editorial changes + * * Revision 1.36 2000/11/09 12:32:49 rassmann * Renamed variables. * @@ -36,7 +90,7 @@ * Changed license header to GPL. * * Revision 1.33 1999/08/27 11:17:10 malthoff - * It's more savely to put bracket around marco parameters. + * It's more savely to put brackets around macro parameters. * Brackets added for PHY_READ and PHY_WRITE. * * Revision 1.32 1999/05/19 07:31:01 cgoos @@ -50,7 +104,7 @@ * Add PCI_ERRBITS. * * Revision 1.29 1999/01/26 08:55:48 malthoff - * Bugfix: The 16 bit field releations inside the descriptor are + * Bugfix: The 16 bit field relations inside the descriptor are * endianess dependend if the descriptor reversal feature * (PCI_REV_DESC bit in PCI_OUR_REG_2) is enabled. * Drivers which use this feature has to set the define @@ -91,7 +145,7 @@ * fix: typo B0_XM_IMSK regs * * Revision 1.18 1998/10/16 09:46:54 malthoff - * Remove temp defines for ML diag prototyp. + * Remove temp defines for ML diag prototype. * Fix register definition for B0_XM1_PHY_DATA, B0_XM1_PHY_DATA * B0_XM2_PHY_DATA, B0_XM2_PHY_ADDR, B0_XA1_CSR, B0_XS1_CSR, * B0_XS2_CSR, and B0_XA2_CSR. @@ -133,7 +187,7 @@ * * Revision 1.10 1998/09/02 11:16:39 malthoff * Temporary modify B2_I2C_SW to make tests with - * the GE/ML prototyp. + * the GE/ML prototype. * * Revision 1.9 1998/08/19 09:11:49 gklug * fix: struct are removed from c-source (see CCC) @@ -145,7 +199,7 @@ * * Revision 1.7 1998/07/03 14:42:26 malthoff * bug fix: Correct macro XMA(). - * Add temporary workaround to access the PCI config space over IO + * Add temporary workaround to access the PCI config space over I/O * * Revision 1.6 1998/06/23 11:30:36 malthoff * Remove ';' with ',' in macors. @@ -174,38 +228,120 @@ /* defines ********************************************************************/ +#define BIT_31 (1UL << 31) +#define BIT_30 (1L << 30) +#define BIT_29 (1L << 29) +#define BIT_28 (1L << 28) +#define BIT_27 (1L << 27) +#define BIT_26 (1L << 26) +#define BIT_25 (1L << 25) +#define BIT_24 (1L << 24) +#define BIT_23 (1L << 23) +#define BIT_22 (1L << 22) +#define BIT_21 (1L << 21) +#define BIT_20 (1L << 20) +#define BIT_19 (1L << 19) +#define BIT_18 (1L << 18) +#define BIT_17 (1L << 17) +#define BIT_16 (1L << 16) +#define BIT_15 (1L << 15) +#define BIT_14 (1L << 14) +#define BIT_13 (1L << 13) +#define BIT_12 (1L << 12) +#define BIT_11 (1L << 11) +#define BIT_10 (1L << 10) +#define BIT_9 (1L << 9) +#define BIT_8 (1L << 8) +#define BIT_7 (1L << 7) +#define BIT_6 (1L << 6) +#define BIT_5 (1L << 5) +#define BIT_4 (1L << 4) +#define BIT_3 (1L << 3) +#define BIT_2 (1L << 2) +#define BIT_1 (1L << 1) +#define BIT_0 1L + +#define BIT_15S (1U << 15) +#define BIT_14S (1 << 14) +#define BIT_13S (1 << 13) +#define BIT_12S (1 << 12) +#define BIT_11S (1 << 11) +#define BIT_10S (1 << 10) +#define BIT_9S (1 << 9) +#define BIT_8S (1 << 8) +#define BIT_7S (1 << 7) +#define BIT_6S (1 << 6) +#define BIT_5S (1 << 5) +#define BIT_4S (1 << 4) +#define BIT_3S (1 << 3) +#define BIT_2S (1 << 2) +#define BIT_1S (1 << 1) +#define BIT_0S 1 + +#define SHIFT31(x) ((x) << 31) +#define SHIFT30(x) ((x) << 30) +#define SHIFT29(x) ((x) << 29) +#define SHIFT28(x) ((x) << 28) +#define SHIFT27(x) ((x) << 27) +#define SHIFT26(x) ((x) << 26) +#define SHIFT25(x) ((x) << 25) +#define SHIFT24(x) ((x) << 24) +#define SHIFT23(x) ((x) << 23) +#define SHIFT22(x) ((x) << 22) +#define SHIFT21(x) ((x) << 21) +#define SHIFT20(x) ((x) << 20) +#define SHIFT19(x) ((x) << 19) +#define SHIFT18(x) ((x) << 18) +#define SHIFT17(x) ((x) << 17) +#define SHIFT16(x) ((x) << 16) +#define SHIFT15(x) ((x) << 15) +#define SHIFT14(x) ((x) << 14) +#define SHIFT13(x) ((x) << 13) +#define SHIFT12(x) ((x) << 12) +#define SHIFT11(x) ((x) << 11) +#define SHIFT10(x) ((x) << 10) +#define SHIFT9(x) ((x) << 9) +#define SHIFT8(x) ((x) << 8) +#define SHIFT7(x) ((x) << 7) +#define SHIFT6(x) ((x) << 6) +#define SHIFT5(x) ((x) << 5) +#define SHIFT4(x) ((x) << 4) +#define SHIFT3(x) ((x) << 3) +#define SHIFT2(x) ((x) << 2) +#define SHIFT1(x) ((x) << 1) +#define SHIFT0(x) ((x) << 0) + /* * Configuration Space header * Since this module is used for different OS', those may be * duplicate on some of them (e.g. Linux). But to keep the * common source, we have to live with this... */ -#define PCI_VENDOR_ID 0x00 /* 16 bit Vendor ID */ -#define PCI_DEVICE_ID 0x02 /* 16 bit Device ID */ -#define PCI_COMMAND 0x04 /* 16 bit Command */ -#define PCI_STATUS 0x06 /* 16 bit Status */ -#define PCI_REV_ID 0x08 /* 8 bit Revision ID */ -#define PCI_CLASS_CODE 0x09 /* 24 bit Class Code */ -#define PCI_CACHE_LSZ 0x0c /* 8 bit Cache Line Size */ -#define PCI_LAT_TIM 0x0d /* 8 bit Latency Timer */ -#define PCI_HEADER_T 0x0e /* 8 bit Header Type */ -#define PCI_BIST 0x0f /* 8 bit Built-in selftest */ -#define PCI_BASE_1ST 0x10 /* 32 bit 1st Base address */ -#define PCI_BASE_2ND 0x14 /* 32 bit 2nd Base address */ - /* Byte 18..2b: reserved */ -#define PCI_SUB_VID 0x2c /* 16 bit Subsystem Vendor ID */ -#define PCI_SUB_ID 0x2e /* 16 bit Subsystem ID */ -#define PCI_BASE_ROM 0x30 /* 32 bit Expansion ROM Base Address */ - /* Byte 34..33: reserved */ +#define PCI_VENDOR_ID 0x00 /* 16 bit Vendor ID */ +#define PCI_DEVICE_ID 0x02 /* 16 bit Device ID */ +#define PCI_COMMAND 0x04 /* 16 bit Command */ +#define PCI_STATUS 0x06 /* 16 bit Status */ +#define PCI_REV_ID 0x08 /* 8 bit Revision ID */ +#define PCI_CLASS_CODE 0x09 /* 24 bit Class Code */ +#define PCI_CACHE_LSZ 0x0c /* 8 bit Cache Line Size */ +#define PCI_LAT_TIM 0x0d /* 8 bit Latency Timer */ +#define PCI_HEADER_T 0x0e /* 8 bit Header Type */ +#define PCI_BIST 0x0f /* 8 bit Built-in selftest */ +#define PCI_BASE_1ST 0x10 /* 32 bit 1st Base address */ +#define PCI_BASE_2ND 0x14 /* 32 bit 2nd Base address */ + /* Byte 0x18..0x2b: reserved */ +#define PCI_SUB_VID 0x2c /* 16 bit Subsystem Vendor ID */ +#define PCI_SUB_ID 0x2e /* 16 bit Subsystem ID */ +#define PCI_BASE_ROM 0x30 /* 32 bit Expansion ROM Base Address */ #define PCI_CAP_PTR 0x34 /* 8 bit Capabilities Ptr */ /* Byte 35..3b: reserved */ -#define PCI_IRQ_LINE 0x3c /* 8 bit Interrupt Line */ -#define PCI_IRQ_PIN 0x3d /* 8 bit Interrupt Pin */ -#define PCI_MIN_GNT 0x3e /* 8 bit Min_Gnt */ -#define PCI_MAX_LAT 0x3f /* 8 bit Max_Lat */ +#define PCI_IRQ_LINE 0x3c /* 8 bit Interrupt Line */ +#define PCI_IRQ_PIN 0x3d /* 8 bit Interrupt Pin */ +#define PCI_MIN_GNT 0x3e /* 8 bit Min_Gnt */ +#define PCI_MAX_LAT 0x3f /* 8 bit Max_Lat */ /* Device Dependent Region */ -#define PCI_OUR_REG_1 0x40 /* 32 bit Our Register 1 */ -#define PCI_OUR_REG_2 0x44 /* 32 bit Our Register 2 */ +#define PCI_OUR_REG_1 0x40 /* 32 bit Our Register 1 */ +#define PCI_OUR_REG_2 0x44 /* 32 bit Our Register 2 */ /* Power Management Region */ #define PCI_PM_CAP_ID 0x48 /* 8 bit Power Management Cap. ID */ #define PCI_PM_NITEM 0x49 /* 8 bit Next Item Ptr */ @@ -214,11 +350,11 @@ /* Byte 0x4e: reserved */ #define PCI_PM_DAT_REG 0x4f /* 8 bit Power Manag. Data Register */ /* VPD Region */ -#define PCI_VPD_CAP_ID 0x50 /* 8 bit VPD Cap. ID */ +#define PCI_VPD_CAP_ID 0x50 /* 8 bit VPD Cap. ID */ #define PCI_VPD_NITEM 0x51 /* 8 bit Next Item Ptr */ #define PCI_VPD_ADR_REG 0x52 /* 16 bit VPD Address Register */ #define PCI_VPD_DAT_REG 0x54 /* 32 bit VPD Data Register */ - /* Byte 58..ff: reserved */ + /* Byte 0x58..0xff: reserved */ /* * I2C Address (PCI Config) @@ -231,38 +367,38 @@ /* * Define Bits and Values of the registers */ -/* PCI_VENDOR_ID 16 bit Vendor ID */ -/* PCI_DEVICE_ID 16 bit Device ID */ -/* Values for Vendor ID and Device ID shall be patched into the code */ /* PCI_COMMAND 16 bit Command */ /* Bit 15..10: reserved */ -#define PCI_FBTEN (1<<9) /* Bit 9: Fast Back-To-Back enable */ -#define PCI_SERREN (1<<8) /* Bit 8: SERR enable */ -#define PCI_ADSTEP (1<<7) /* Bit 7: Address Stepping */ -#define PCI_PERREN (1<<6) /* Bit 6: Parity Report Response enable */ -#define PCI_VGA_SNOOP (1<<5) /* Bit 5: VGA palette snoop */ -#define PCI_MWIEN (1<<4) /* Bit 4: Memory write an inv cycl ena */ -#define PCI_SCYCEN (1<<3) /* Bit 3: Special Cycle enable */ -#define PCI_BMEN (1<<2) /* Bit 2: Bus Master enable */ -#define PCI_MEMEN (1<<1) /* Bit 1: Memory Space Access enable */ -#define PCI_IOEN (1<<0) /* Bit 0: IO Space Access enable */ +#define PCI_FBTEN BIT_9S /* Fast Back-To-Back enable */ +#define PCI_SERREN BIT_8S /* SERR enable */ +#define PCI_ADSTEP BIT_7S /* Address Stepping */ +#define PCI_PERREN BIT_6S /* Parity Report Response enable */ +#define PCI_VGA_SNOOP BIT_5S /* VGA palette snoop */ +#define PCI_MWIEN BIT_4S /* Memory write an inv cycl ena */ +#define PCI_SCYCEN BIT_3S /* Special Cycle enable */ +#define PCI_BMEN BIT_2S /* Bus Master enable */ +#define PCI_MEMEN BIT_1S /* Memory Space Access enable */ +#define PCI_IOEN BIT_0S /* I/O Space Access enable */ + +#define PCI_COMMAND_VAL (PCI_FBTEN | PCI_SERREN | PCI_PERREN | PCI_MWIEN |\ + PCI_BMEN | PCI_MEMEN | PCI_IOEN) /* PCI_STATUS 16 bit Status */ -#define PCI_PERR (1<<15) /* Bit 15: Parity Error */ -#define PCI_SERR (1<<14) /* Bit 14: Signaled SERR */ -#define PCI_RMABORT (1<<13) /* Bit 13: Received Master Abort */ -#define PCI_RTABORT (1<<12) /* Bit 12: Received Target Abort */ +#define PCI_PERR BIT_15S /* Parity Error */ +#define PCI_SERR BIT_14S /* Signaled SERR */ +#define PCI_RMABORT BIT_13S /* Received Master Abort */ +#define PCI_RTABORT BIT_12S /* Received Target Abort */ /* Bit 11: reserved */ -#define PCI_DEVSEL (3<<9) /* Bit 10..9: DEVSEL Timing */ -#define PCI_DEV_FAST (0<<9) /* fast */ -#define PCI_DEV_MEDIUM (1<<9) /* medium */ -#define PCI_DEV_SLOW (2<<9) /* slow */ -#define PCI_DATAPERR (1<<8) /* Bit 8: DATA Parity error detected */ -#define PCI_FB2BCAP (1<<7) /* Bit 7: Fast Back-to-Back Capability */ -#define PCI_UDF (1<<6) /* Bit 6: User Defined Features */ -#define PCI_66MHZCAP (1<<5) /* Bit 5: 66 MHz PCI bus clock capable */ -#define PCI_NEWCAP (1<<4) /* Bit 4: New cap. list implemented */ - /* Bit 3..0: reserved */ +#define PCI_DEVSEL (3<<9) /* Bit 10.. 9: DEVSEL Timing */ +#define PCI_DEV_FAST (0<<9) /* fast */ +#define PCI_DEV_MEDIUM (1<<9) /* medium */ +#define PCI_DEV_SLOW (2<<9) /* slow */ +#define PCI_DATAPERR BIT_8S /* DATA Parity error detected */ +#define PCI_FB2BCAP BIT_7S /* Fast Back-to-Back Capability */ +#define PCI_UDF BIT_6S /* User Defined Features */ +#define PCI_66MHZCAP BIT_5S /* 66 MHz PCI bus clock capable */ +#define PCI_NEWCAP BIT_4S /* New cap. list implemented */ + /* Bit 3.. 0: reserved */ #define PCI_ERRBITS (PCI_PERR | PCI_SERR | PCI_RMABORT | PCI_RTABORT |\ PCI_DATAPERR) @@ -276,159 +412,167 @@ /* Possible values: 0,2,4,8,16,32,64,128 */ /* PCI_HEADER_T 8 bit Header Type */ -#define PCI_HD_MF_DEV (1<<7) /* Bit 7: 0= single, 1= multi-func dev */ -#define PCI_HD_TYPE 0x7f /* Bit 6..0: Header Layout 0= normal */ +#define PCI_HD_MF_DEV BIT_7S /* 0= single, 1= multi-func dev */ +#define PCI_HD_TYPE 0x7f /* Bit 6..0: Header Layout 0= normal */ /* PCI_BIST 8 bit Built-in selftest */ /* Built-in Self test not supported (optional) */ /* PCI_BASE_1ST 32 bit 1st Base address */ -#define PCI_MEMSIZE 0x4000L /* use 16 kB Memory Base */ -#define PCI_MEMBASE_MSK 0xffffc000L /* Bit 31..14: Memory Base Address */ -#define PCI_MEMSIZE_MSK 0x00003ff0L /* Bit 13.. 4: Memory Size Req. */ -#define PCI_PREFEN (1L<<3) /* Bit 3: Prefetchable */ -#define PCI_MEM_TYP (3L<<2) /* Bit 2.. 1: Memory Type */ -#define PCI_MEM32BIT (0L<<1) /* Base addr anywhere in 32 Bit range */ -#define PCI_MEM1M (1L<<1) /* Base addr below 1 MegaByte */ -#define PCI_MEM64BIT (2L<<1) /* Base addr anywhere in 64 Bit range */ -#define PCI_MEMSPACE (1L<<0) /* Bit 0: Memory Space Indic. */ +#define PCI_MEMSIZE 0x4000L /* use 16 kB Memory Base */ +#define PCI_MEMBASE_MSK 0xffffc000L /* Bit 31..14: Memory Base Address */ +#define PCI_MEMSIZE_MSK 0x00003ff0L /* Bit 13.. 4: Memory Size Req. */ +#define PCI_PREFEN BIT_3 /* Prefetchable */ +#define PCI_MEM_TYP (3L<<2) /* Bit 2.. 1: Memory Type */ +#define PCI_MEM32BIT (0L<<1) /* Base addr anywhere in 32 Bit range */ +#define PCI_MEM1M (1L<<1) /* Base addr below 1 MegaByte */ +#define PCI_MEM64BIT (2L<<1) /* Base addr anywhere in 64 Bit range */ +#define PCI_MEMSPACE BIT_0 /* Memory Space Indic. */ /* PCI_BASE_2ND 32 bit 2nd Base address */ -#define PCI_IOBASE 0xffffff00L /* Bit 31..8: I/O Base address */ -#define PCI_IOSIZE 0x000000fcL /* Bit 7..2: I/O Size Requirements */ +#define PCI_IOBASE 0xffffff00L /* Bit 31.. 8: I/O Base address */ +#define PCI_IOSIZE 0x000000fcL /* Bit 7.. 2: I/O Size Requirements */ /* Bit 1: reserved */ -#define PCI_IOSPACE (1L<<0) /* Bit 0: I/O Space Indicator */ +#define PCI_IOSPACE BIT_0 /* I/O Space Indicator */ /* PCI_BASE_ROM 32 bit Expansion ROM Base Address */ -#define PCI_ROMBASE (0xfffeL<<17) /* Bit 31..17: ROM BASE address (1st)*/ -#define PCI_ROMBASZ (0x1cL<<14) /* Bit 16..14: Treat as BASE or SIZE */ -#define PCI_ROMSIZE (0x38L<<11) /* Bit 13..11: ROM Size Requirements */ +#define PCI_ROMBASE 0xfffe0000L /* Bit 31..17: ROM BASE address (1st)*/ +#define PCI_ROMBASZ (0x1cL<<14) /* Bit 16..14: Treat as BASE or SIZE */ +#define PCI_ROMSIZE (0x38L<<11) /* Bit 13..11: ROM Size Requirements */ /* Bit 10.. 1: reserved */ -#define PCI_ROMEN (0x1L<<0) /* Bit 0: Address Decode enable */ +#define PCI_ROMEN BIT_0 /* Address Decode enable */ /* Device Dependent Region */ /* PCI_OUR_REG_1 32 bit Our Register 1 */ - /* Bit 31..26: reserved */ -#define PCI_VIO (1L<<25) /* Bit 25: PCI IO Voltage, */ - /* 0 = 3.3V / 1 = 5V */ -#define PCI_EN_BOOT (1L<<24) /* Bit 24: Enable BOOT via ROM */ - /* 1 = Don't boot wth ROM*/ - /* 0 = Boot with ROM */ -#define PCI_EN_IO (1L<<23) /* Bit 23: Mapping to IO space */ -#define PCI_EN_FPROM (1L<<22) /* Bit 22: FLASH mapped to mem? */ + /* Bit 31..29: reserved */ +#define PCI_PHY_COMA BIT_28 /* Set PHY to Coma Mode */ +#define PCI_EN_CAL BIT_27 /* Enable PCI buffer strength calibr. */ +#define PCI_DIS_CAL BIT_26 /* Disable PCI buffer strength calibr. */ +#define PCI_VIO BIT_25 /* PCI I/O Voltage, 0 = 3.3V, 1 = 5V */ +#define PCI_EN_BOOT BIT_24 /* Enable BOOT via ROM */ +#define PCI_EN_IO BIT_23 /* Mapping to I/O space */ +#define PCI_EN_FPROM BIT_22 /* FLASH mapped to mem? */ /* 1 = Map Flash to Mem */ /* 0 = Disable addr. dec*/ -#define PCI_PAGESIZE (3L<<20) /* Bit 21..20: FLASH Page Size */ -#define PCI_PAGE_16 (0L<<20) /* 16 k pages */ -#define PCI_PAGE_32K (1L<<20) /* 32 k pages */ -#define PCI_PAGE_64K (2L<<20) /* 64 k pages */ -#define PCI_PAGE_128K (3L<<20) /* 128 k pages */ +#define PCI_PAGESIZE (3L<<20) /* Bit 21..20: FLASH Page Size */ +#define PCI_PAGE_16 (0L<<20) /* 16 k pages */ +#define PCI_PAGE_32K (1L<<20) /* 32 k pages */ +#define PCI_PAGE_64K (2L<<20) /* 64 k pages */ +#define PCI_PAGE_128K (3L<<20) /* 128 k pages */ /* Bit 19: reserved */ -#define PCI_PAGEREG (7L<<16) /* Bit 18..16: Page Register */ -#define PCI_NOTAR (1L<<15) /* Bit 15: No turnaround cycle */ -#define PCI_FORCE_BE (1L<<14) /* Bit 14: Assert all BEs on MR */ -#define PCI_DIS_MRL (1L<<13) /* Bit 13: Disable Mem R Line */ -#define PCI_DIS_MRM (1L<<12) /* Bit 12: Disable Mem R multip */ -#define PCI_DIS_MWI (1L<<11) /* Bit 11: Disable Mem W & inv */ -#define PCI_DISC_CLS (1L<<10) /* Bit 10: Disc: cacheLsz bound */ -#define PCI_BURST_DIS (1L<<9) /* Bit 9: Burst Disable */ -#define PCI_DIS_PCI_CLK (1L<<8) /* Bit 8: Disable PCI clock driv*/ -#define PCI_SKEW_DAS (0xfL<<4) /* Bit 7..4: Skew Ctrl, DAS Ext */ -#define PCI_SKEW_BASE (0xfL<<0) /* Bit 3..0: Skew Ctrl, Base */ +#define PCI_PAGEREG (7L<<16) /* Bit 18..16: Page Register */ +#define PCI_NOTAR BIT_15 /* No turnaround cycle */ +#define PCI_FORCE_BE BIT_14 /* Assert all BEs on MR */ +#define PCI_DIS_MRL BIT_13 /* Disable Mem Read Line */ +#define PCI_DIS_MRM BIT_12 /* Disable Mem Read Multiple */ +#define PCI_DIS_MWI BIT_11 /* Disable Mem Write & Invalidate */ +#define PCI_DISC_CLS BIT_10 /* Disc: cacheLsz bound */ +#define PCI_BURST_DIS BIT_9 /* Burst Disable */ +#define PCI_DIS_PCI_CLK BIT_8 /* Disable PCI clock driving */ +#define PCI_SKEW_DAS (0xfL<<4) /* Bit 7.. 4: Skew Ctrl, DAS Ext */ +#define PCI_SKEW_BASE 0xfL /* Bit 3.. 0: Skew Ctrl, Base */ /* PCI_OUR_REG_2 32 bit Our Register 2 */ #define PCI_VPD_WR_THR (0xffL<<24) /* Bit 31..24: VPD Write Threshold */ -#define PCI_DEV_SEL (0x7fL<<17) /* Bit 23..17: EEPROM Device Select */ -#define PCI_VPD_ROM_SZ (7L<<14) /* Bit 16..14: VPD ROM Size */ +#define PCI_DEV_SEL (0x7fL<<17) /* Bit 23..17: EEPROM Device Select */ +#define PCI_VPD_ROM_SZ (7L<<14) /* Bit 16..14: VPD ROM Size */ /* Bit 13..12: reserved */ -#define PCI_PATCH_DIR (0xfL<<8) /* Bit 11.. 8: Ext Patchs dir 3..0 */ -#define PCI_PATCH_DIR_0 (1L<<8) -#define PCI_PATCH_DIR_1 (1L<<9) -#define PCI_PATCH_DIR_2 (1L<<10) -#define PCI_PATCH_DIR_3 (1L<<11) -#define PCI_EXT_PATCHS (0xfL<<4) /* Bit 7..4: Extended Patches 3..0 */ -#define PCI_EXT_PATCH_0 (1L<<4) -#define PCI_EXT_PATCH_1 (1L<<5) -#define PCI_EXT_PATCH_2 (1L<<6) -#define PCI_EXT_PATCH_3 (1L<<7) -#define PCI_EN_DUMMY_RD (1L<<3) /* Bit 3: Enable Dummy Read */ -#define PCI_REV_DESC (1L<<2) /* Bit 2: Reverse Desc. Bytes */ +#define PCI_PATCH_DIR (0xfL<<8) /* Bit 11.. 8: Ext Patches dir 3..0 */ +#define PCI_PATCH_DIR_3 BIT_11 +#define PCI_PATCH_DIR_2 BIT_10 +#define PCI_PATCH_DIR_1 BIT_9 +#define PCI_PATCH_DIR_0 BIT_8 +#define PCI_EXT_PATCHS (0xfL<<4) /* Bit 7.. 4: Extended Patches 3..0 */ +#define PCI_EXT_PATCH_3 BIT_7 +#define PCI_EXT_PATCH_2 BIT_6 +#define PCI_EXT_PATCH_1 BIT_5 +#define PCI_EXT_PATCH_0 BIT_4 +#define PCI_EN_DUMMY_RD BIT_3 /* Enable Dummy Read */ +#define PCI_REV_DESC BIT_2 /* Reverse Desc. Bytes */ /* Bit 1: reserved */ -#define PCI_USEDATA64 (1L<<0) /* Bit 0: Use 64Bit Data bus ext*/ +#define PCI_USEDATA64 BIT_0 /* Use 64Bit Data bus ext */ /* Power Management Region */ /* PCI_PM_CAP_REG 16 bit Power Management Capabilities */ -#define PCI_PME_SUP (0x1f<<11) /* Bit 15..11: PM Manag. Event Sup */ -#define PCI_PM_D2_SUB (1<<10) /* Bit 10: D2 Support Bit */ -#define PCI_PM_D1_SUB (1<<9) /* Bit 9: D1 Support Bit */ - /* Bit 8..6: reserved */ -#define PCI_PM_DSI (1<<5) /* Bit 5: Device Specific Init.*/ -#define PCI_PM_APS (1<<4) /* Bit 4: Auxialiary Power Src */ -#define PCI_PME_CLOCK (1<<3) /* Bit 3: PM Event Clock */ -#define PCI_PM_VER (7<<0) /* Bit 2..0: PM PCI Spec. version */ - -/* PCI_PM_CTL_STS 16 bit Power Manag. Control/Status */ -#define PCI_PME_STATUS (1<<15) /* Bit 15: PGA doesn't sup. PME# */ -#define PCI_PM_DAT_SCL (3<<13) /* Bit 14..13: dat reg Scaling factor*/ -#define PCI_PM_DAT_SEL (0xf<<9) /* Bit 12.. 9: PM data selector field*/ -#define PCI_PME_EN (1<<8) /* Bit 8: PGA doesn't sup. PME# */ +#define PCI_PME_SUP (0x1f<<11) /* Bit 15..11: PM Event Support */ +#define PCI_PME_D3C_SUP BIT_15S /* PME from D3cold Support (if Vaux) */ +#define PCI_PME_D3H_SUP BIT_14S /* PME from D3hot Support */ +#define PCI_PME_D2_SUP BIT_13S /* PME from D2 Support */ +#define PCI_PME_D1_SUP BIT_12S /* PME from D1 Support */ +#define PCI_PME_D0_SUP BIT_11S /* PME from D0 Support */ +#define PCI_PM_D2_SUP BIT_10S /* D2 Support in 33 MHz mode */ +#define PCI_PM_D1_SUP BIT_9S /* D1 Support */ + /* Bit 8.. 6: reserved */ +#define PCI_PM_DSI BIT_5S /* Device Specific Initialization */ +#define PCI_PM_APS BIT_4S /* Auxialiary Power Source */ +#define PCI_PME_CLOCK BIT_3S /* PM Event Clock */ +#define PCI_PM_VER_MSK 7 /* Bit 2.. 0: PM PCI Spec. version */ + +/* PCI_PM_CTL_STS 16 bit Power Management Control/Status */ +#define PCI_PME_STATUS BIT_15S /* PME Status (YUKON only) */ +#define PCI_PM_DAT_SCL (3<<13) /* Bit 14..13: Data Reg. scaling factor */ +#define PCI_PM_DAT_SEL (0xf<<9) /* Bit 12.. 9: PM data selector field */ +#define PCI_PME_EN BIT_8S /* Enable PME# generation (YUKON only) */ /* Bit 7.. 2: reserved */ -#define PCI_PM_STATE (3<<0) /* Bit 1.. 0: Power Management State*/ -#define PCI_PM_STATE_D0 (0<<0) /* D0: Operational (default) */ -#define PCI_PM_STATE_D1 (1<<0) /* D1: not supported */ -#define PCI_PM_STATE_D2 (2<<0) /* D2: not supported */ -#define PCI_PM_STATE_D3 (3<<0) /* D3: HOT, Power Down and Reset */ +#define PCI_PM_STATE_MSK 3 /* Bit 1.. 0: Power Management State */ + +#define PCI_PM_STATE_D0 0 /* D0: Operational (default) */ +#define PCI_PM_STATE_D1 1 /* D1: (YUKON only) */ +#define PCI_PM_STATE_D2 2 /* D2: (YUKON only) */ +#define PCI_PM_STATE_D3 3 /* D3: HOT, Power Down and Reset */ /* VPD Region */ /* PCI_VPD_ADR_REG 16 bit VPD Address Register */ -#define PCI_VPD_FLAG (1L<<15) /* Bit 15: starts VPD rd/wd cycle*/ +#define PCI_VPD_FLAG BIT_15S /* starts VPD rd/wd cycle*/ +#define PCI_VPD_ADDR 0x3fffL /* Bit 14.. 0: VPD address */ + +/* Control Register File (Address Map) */ /* - * Control Register File: * Bank 0 */ -#define B0_RAP 0x0000 /* 8 bit Register Address Port */ +#define B0_RAP 0x0000 /* 8 bit Register Address Port */ /* 0x0001 - 0x0003: reserved */ -#define B0_CTST 0x0004 /* 16 bit Control/Status register */ -#define B0_LED 0x0006 /* 8 Bit LED register */ - /* 0x0007: reserved */ -#define B0_ISRC 0x0008 /* 32 bit Interrupt Source Register */ -#define B0_IMSK 0x000c /* 32 bit Interrupt Mask Register */ +#define B0_CTST 0x0004 /* 16 bit Control/Status register */ +#define B0_LED 0x0006 /* 8 Bit LED register */ +#define B0_POWER_CTRL 0x0007 /* 8 Bit Power Control reg (YUKON only) */ +#define B0_ISRC 0x0008 /* 32 bit Interrupt Source Register */ +#define B0_IMSK 0x000c /* 32 bit Interrupt Mask Register */ #define B0_HWE_ISRC 0x0010 /* 32 bit HW Error Interrupt Src Reg */ #define B0_HWE_IMSK 0x0014 /* 32 bit HW Error Interrupt Mask Reg */ #define B0_SP_ISRC 0x0018 /* 32 bit Special Interrupt Source Reg */ /* 0x001c: reserved */ -/* B0 XMAC 1 registers */ -#define B0_XM1_IMSK 0x0020 /* 16 bit r/w XMAC 1 Interrupt Mask Register*/ - /* 0x0022 - 0x0027 reserved */ +/* B0 XMAC 1 registers (GENESIS only) */ +#define B0_XM1_IMSK 0x0020 /* 16 bit r/w XMAC 1 Interrupt Mask Register*/ + /* 0x0022 - 0x0027: reserved */ #define B0_XM1_ISRC 0x0028 /* 16 bit ro XMAC 1 Interrupt Status Reg */ - /* 0x002a - 0x002f reserved */ + /* 0x002a - 0x002f: reserved */ #define B0_XM1_PHY_ADDR 0x0030 /* 16 bit r/w XMAC 1 PHY Address Register */ - /* 0x0032 - 0x0033 reserved */ + /* 0x0032 - 0x0033: reserved */ #define B0_XM1_PHY_DATA 0x0034 /* 16 bit r/w XMAC 1 PHY Data Register */ - /* 0x0036 - 0x003f reserved */ + /* 0x0036 - 0x003f: reserved */ -/* B0 XMAC 2 registers */ -#define B0_XM2_IMSK 0x0040 /* 16 bit r/w XMAC 2 Interrupt Mask Register*/ - /* 0x0042 - 0x0047 reserved */ +/* B0 XMAC 2 registers (GENESIS only) */ +#define B0_XM2_IMSK 0x0040 /* 16 bit r/w XMAC 2 Interrupt Mask Register*/ + /* 0x0042 - 0x0047: reserved */ #define B0_XM2_ISRC 0x0048 /* 16 bit ro XMAC 2 Interrupt Status Reg */ - /* 0x004a - 0x004f reserved */ + /* 0x004a - 0x004f: reserved */ #define B0_XM2_PHY_ADDR 0x0050 /* 16 bit r/w XMAC 2 PHY Address Register */ - /* 0x0052 - 0x0053 reserved */ + /* 0x0052 - 0x0053: reserved */ #define B0_XM2_PHY_DATA 0x0054 /* 16 bit r/w XMAC 2 PHY Data Register */ - /* 0x0056 - 0x005f reserved */ + /* 0x0056 - 0x005f: reserved */ /* BMU Control Status Registers */ -#define B0_R1_CSR 0x0060 /* 32 bit BMU Ctrl/Stat Rx Queue 1 */ -#define B0_R2_CSR 0x0064 /* 32 bit BMU Ctrl/Stat Rx Queue 2 */ -#define B0_XS1_CSR 0x0068 /* 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */ -#define B0_XA1_CSR 0x006c /* 32 bit BMU Ctrl/Stat Async Tx Queue 1*/ -#define B0_XS2_CSR 0x0070 /* 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */ -#define B0_XA2_CSR 0x0074 /* 32 bit BMU Ctrl/Stat Async Tx Queue 2*/ - /* x0078 - 0x007f reserved */ +#define B0_R1_CSR 0x0060 /* 32 bit BMU Ctrl/Stat Rx Queue 1 */ +#define B0_R2_CSR 0x0064 /* 32 bit BMU Ctrl/Stat Rx Queue 2 */ +#define B0_XS1_CSR 0x0068 /* 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */ +#define B0_XA1_CSR 0x006c /* 32 bit BMU Ctrl/Stat Async Tx Queue 1*/ +#define B0_XS2_CSR 0x0070 /* 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */ +#define B0_XA2_CSR 0x0074 /* 32 bit BMU Ctrl/Stat Async Tx Queue 2*/ + /* 0x0078 - 0x007f: reserved */ /* * Bank 1 @@ -440,20 +584,19 @@ * Bank 2 */ /* NA reg = 48 bit Network Address Register, 3x16 or 8x8 bit readable */ - -#define B2_MAC_1 0x0100 /* NA reg MAC Address 1 */ - /* 0x0106 - 0x0107 reserved */ -#define B2_MAC_2 0x0108 /* NA reg MAC Address 2 */ - /* 0x010e - 0x010f reserved */ -#define B2_MAC_3 0x0110 /* NA reg MAC Address 3 */ - /* 0x0116 - 0x0117 reserved */ +#define B2_MAC_1 0x0100 /* NA reg MAC Address 1 */ + /* 0x0106 - 0x0107: reserved */ +#define B2_MAC_2 0x0108 /* NA reg MAC Address 2 */ + /* 0x010e - 0x010f: reserved */ +#define B2_MAC_3 0x0110 /* NA reg MAC Address 3 */ + /* 0x0116 - 0x0117: reserved */ #define B2_CONN_TYP 0x0118 /* 8 bit Connector type */ #define B2_PMD_TYP 0x0119 /* 8 bit PMD type */ -#define B2_MAC_CFG 0x011a /* 8 bit MAC Configuration */ -#define B2_CHIP_REV 0x011b /* 8 bit Queen Chip Revision Number */ +#define B2_MAC_CFG 0x011a /* 8 bit MAC Configuration / Chip Revision */ +#define B2_CHIP_ID 0x011b /* 8 bit Chip Identification Number */ /* Eprom registers are currently of no use */ -#define B2_E_0 0x011c /* 8 bit EPROM Byte 0 */ -#define B2_E_1 0x011d /* 8 bit EPROM Byte 1 */ +#define B2_E_0 0x011c /* 8 bit EPROM Byte 0 (ext. SRAM size */ +#define B2_E_1 0x011d /* 8 bit EPROM Byte 1 (PHY type) */ #define B2_E_2 0x011e /* 8 bit EPROM Byte 2 */ #define B2_E_3 0x011f /* 8 bit EPROM Byte 3 */ #define B2_FAR 0x0120 /* 32 bit Flash-Prom Addr Reg/Cnt */ @@ -462,9 +605,9 @@ #define B2_LD_CRTL 0x0128 /* 8 bit EPROM loader control register */ #define B2_LD_TEST 0x0129 /* 8 bit EPROM loader test register */ /* 0x012a - 0x012f: reserved */ -#define B2_TI_INI 0x0130 /* 32 bit Timer init value */ -#define B2_TI_VAL 0x0134 /* 32 bit Timer value */ -#define B2_TI_CRTL 0x0138 /* 8 bit Timer control */ +#define B2_TI_INI 0x0130 /* 32 bit Timer Init Value */ +#define B2_TI_VAL 0x0134 /* 32 bit Timer Value */ +#define B2_TI_CRTL 0x0138 /* 8 bit Timer Control */ #define B2_TI_TEST 0x0139 /* 8 Bit Timer Test */ /* 0x013a - 0x013f: reserved */ #define B2_IRQM_INI 0x0140 /* 32 bit IRQ Moderation Timer Init Reg.*/ @@ -474,14 +617,16 @@ #define B2_IRQM_MSK 0x014c /* 32 bit IRQ Moderation Mask */ #define B2_IRQM_HWE_MSK 0x0150 /* 32 bit IRQ Moderation HW Error Mask */ /* 0x0154 - 0x0157: reserved */ -#define B2_TST_CTRL1 0x0158 /* 8 bit Test Control Register 1 */ +#define B2_TST_CTRL1 0x0158 /* 8 bit Test Control Register 1 */ #define B2_TST_CTRL2 0x0159 /* 8 bit Test Control Register 2 */ /* 0x015a - 0x015b: reserved */ -#define B2_GP_IO 0x015c /* 32 bit General Purpose IO Register */ +#define B2_GP_IO 0x015c /* 32 bit General Purpose I/O Register */ #define B2_I2C_CTRL 0x0160 /* 32 bit I2C HW Control Register */ #define B2_I2C_DATA 0x0164 /* 32 bit I2C HW Data Register */ #define B2_I2C_IRQ 0x0168 /* 32 bit I2C HW IRQ Register */ #define B2_I2C_SW 0x016c /* 32 bit I2C SW Port Register */ + +/* Blink Source Counter (GENESIS only) */ #define B2_BSC_INI 0x0170 /* 32 bit Blink Source Counter Init Val */ #define B2_BSC_VAL 0x0174 /* 32 bit Blink Source Counter Value */ #define B2_BSC_CTRL 0x0178 /* 8 bit Blink Source Counter Control */ @@ -492,66 +637,70 @@ /* * Bank 3 */ +/* RAM Random Registers */ #define B3_RAM_ADDR 0x0180 /* 32 bit RAM Address, to read or write */ #define B3_RAM_DATA_LO 0x0184 /* 32 bit RAM Data Word (low dWord) */ #define B3_RAM_DATA_HI 0x0188 /* 32 bit RAM Data Word (high dWord) */ /* 0x018c - 0x018f: reserved */ + /* RAM Interface Registers */ /* - * The HW-Spec. call this registers Timeout Value 0..11. But this names are + * The HW-Spec. calls this registers Timeout Value 0..11. But this names are * not usable in SW. Please notice these are NOT real timeouts, these are - * the number of qWords transfered continously. + * the number of qWords transferred continuously. */ -#define B3_RI_WTO_R1 0x0190 /* 8 bit RAM Iface WR Timeout Queue R1 (TO0) */ -#define B3_RI_WTO_XA1 0x0191 /* 8 bit RAM Iface WR Timeout Queue XA1 (TO1) */ -#define B3_RI_WTO_XS1 0x0192 /* 8 bit RAM Iface WR Timeout Queue XS1 (TO2) */ -#define B3_RI_RTO_R1 0x0193 /* 8 bit RAM Iface RD Timeout Queue R1 (TO3) */ -#define B3_RI_RTO_XA1 0x0194 /* 8 bit RAM Iface RD Timeout Queue XA1 (TO4) */ -#define B3_RI_RTO_XS1 0x0195 /* 8 bit RAM Iface RD Timeout Queue XS1 (TO5) */ -#define B3_RI_WTO_R2 0x0196 /* 8 bit RAM Iface WR Timeout Queue R2 (TO6) */ -#define B3_RI_WTO_XA2 0x0197 /* 8 bit RAM Iface WR Timeout Queue XA2 (TO7) */ -#define B3_RI_WTO_XS2 0x0198 /* 8 bit RAM Iface WR Timeout Queue XS2 (TO8) */ -#define B3_RI_RTO_R2 0x0199 /* 8 bit RAM Iface RD Timeout Queue R2 (TO9) */ -#define B3_RI_RTO_XA2 0x019a /* 8 bit RAM Iface RD Timeout Queue XA2 (TO10)*/ -#define B3_RI_RTO_XS2 0x019b /* 8 bit RAM Iface RD Timeout Queue XS2 (TO11)*/ -#define B3_RI_TO_VAL 0x019c /* 8 bit RAM Iface Current Timeout Count Val */ - /* 0x019d - 0x019f reserved */ -#define B3_RI_CTRL 0x01a0 /* 16 bit RAM Iface Control Register */ -#define B3_RI_TEST 0x01a2 /* 8 bit RAM Iface Test Register */ - /* 0x01a3 - 0x01af reserved */ -/* MAC Arbiter Registers */ -/* Please notice these are the number of qWord tranfered continously and */ -/* NOT real timeouts */ -#define B3_MA_TOINI_RX1 0x01b0 /* 8 bit Timeout Init Value Rx Path MAC 1 */ -#define B3_MA_TOINI_RX2 0x01b1 /* 8 bit Timeout Init Value Rx Path MAC 2 */ -#define B3_MA_TOINI_TX1 0x01b2 /* 8 bit Timeout Init Value Tx Path MAC 1 */ -#define B3_MA_TOINI_TX2 0x01b3 /* 8 bit Timeout Init Value Tx Path MAC 2 */ -#define B3_MA_TOVAL_RX1 0x01b4 /* 8 bit Timeout Value Rx Path MAC 1 */ -#define B3_MA_TOVAL_RX2 0x01b5 /* 8 bit Timeout Value Rx Path MAC 1 */ -#define B3_MA_TOVAL_TX1 0x01b6 /* 8 bit Timeout Value Tx Path MAC 2 */ -#define B3_MA_TOVAL_TX2 0x01b7 /* 8 bit Timeout Value Tx Path MAC 2 */ +#define B3_RI_WTO_R1 0x0190 /* 8 bit WR Timeout Queue R1 (TO0) */ +#define B3_RI_WTO_XA1 0x0191 /* 8 bit WR Timeout Queue XA1 (TO1) */ +#define B3_RI_WTO_XS1 0x0192 /* 8 bit WR Timeout Queue XS1 (TO2) */ +#define B3_RI_RTO_R1 0x0193 /* 8 bit RD Timeout Queue R1 (TO3) */ +#define B3_RI_RTO_XA1 0x0194 /* 8 bit RD Timeout Queue XA1 (TO4) */ +#define B3_RI_RTO_XS1 0x0195 /* 8 bit RD Timeout Queue XS1 (TO5) */ +#define B3_RI_WTO_R2 0x0196 /* 8 bit WR Timeout Queue R2 (TO6) */ +#define B3_RI_WTO_XA2 0x0197 /* 8 bit WR Timeout Queue XA2 (TO7) */ +#define B3_RI_WTO_XS2 0x0198 /* 8 bit WR Timeout Queue XS2 (TO8) */ +#define B3_RI_RTO_R2 0x0199 /* 8 bit RD Timeout Queue R2 (TO9) */ +#define B3_RI_RTO_XA2 0x019a /* 8 bit RD Timeout Queue XA2 (TO10)*/ +#define B3_RI_RTO_XS2 0x019b /* 8 bit RD Timeout Queue XS2 (TO11)*/ +#define B3_RI_TO_VAL 0x019c /* 8 bit Current Timeout Count Val */ + /* 0x019d - 0x019f: reserved */ +#define B3_RI_CTRL 0x01a0 /* 16 bit RAM Interface Control Register */ +#define B3_RI_TEST 0x01a2 /* 8 bit RAM Interface Test Register */ + /* 0x01a3 - 0x01af: reserved */ + +/* MAC Arbiter Registers (GENESIS only) */ +/* these are the no. of qWord transferred continuously and NOT real timeouts */ +#define B3_MA_TOINI_RX1 0x01b0 /* 8 bit Timeout Init Val Rx Path MAC 1 */ +#define B3_MA_TOINI_RX2 0x01b1 /* 8 bit Timeout Init Val Rx Path MAC 2 */ +#define B3_MA_TOINI_TX1 0x01b2 /* 8 bit Timeout Init Val Tx Path MAC 1 */ +#define B3_MA_TOINI_TX2 0x01b3 /* 8 bit Timeout Init Val Tx Path MAC 2 */ +#define B3_MA_TOVAL_RX1 0x01b4 /* 8 bit Timeout Value Rx Path MAC 1 */ +#define B3_MA_TOVAL_RX2 0x01b5 /* 8 bit Timeout Value Rx Path MAC 1 */ +#define B3_MA_TOVAL_TX1 0x01b6 /* 8 bit Timeout Value Tx Path MAC 2 */ +#define B3_MA_TOVAL_TX2 0x01b7 /* 8 bit Timeout Value Tx Path MAC 2 */ #define B3_MA_TO_CTRL 0x01b8 /* 16 bit MAC Arbiter Timeout Ctrl Reg */ #define B3_MA_TO_TEST 0x01ba /* 16 bit MAC Arbiter Timeout Test Reg */ - /* 0x01bc - 0x01bf reserved */ -#define B3_MA_RCINI_RX1 0x01c0 /* 8 bit Recovery Init Value Rx Path MAC 1 */ -#define B3_MA_RCINI_RX2 0x01c1 /* 8 bit Recovery Init Value Rx Path MAC 2 */ -#define B3_MA_RCINI_TX1 0x01c2 /* 8 bit Recovery Init Value Tx Path MAC 1 */ -#define B3_MA_RCINI_TX2 0x01c3 /* 8 bit Recovery Init Value Tx Path MAC 2 */ -#define B3_MA_RCVAL_RX1 0x01c4 /* 8 bit Recovery Value Rx Path MAC 1 */ -#define B3_MA_RCVAL_RX2 0x01c5 /* 8 bit Recovery Value Rx Path MAC 1 */ -#define B3_MA_RCVAL_TX1 0x01c6 /* 8 bit Recovery Value Tx Path MAC 2 */ -#define B3_MA_RCVAL_TX2 0x01c7 /* 8 bit Recovery Value Tx Path MAC 2 */ + /* 0x01bc - 0x01bf: reserved */ +#define B3_MA_RCINI_RX1 0x01c0 /* 8 bit Recovery Init Val Rx Path MAC 1 */ +#define B3_MA_RCINI_RX2 0x01c1 /* 8 bit Recovery Init Val Rx Path MAC 2 */ +#define B3_MA_RCINI_TX1 0x01c2 /* 8 bit Recovery Init Val Tx Path MAC 1 */ +#define B3_MA_RCINI_TX2 0x01c3 /* 8 bit Recovery Init Val Tx Path MAC 2 */ +#define B3_MA_RCVAL_RX1 0x01c4 /* 8 bit Recovery Value Rx Path MAC 1 */ +#define B3_MA_RCVAL_RX2 0x01c5 /* 8 bit Recovery Value Rx Path MAC 1 */ +#define B3_MA_RCVAL_TX1 0x01c6 /* 8 bit Recovery Value Tx Path MAC 2 */ +#define B3_MA_RCVAL_TX2 0x01c7 /* 8 bit Recovery Value Tx Path MAC 2 */ #define B3_MA_RC_CTRL 0x01c8 /* 16 bit MAC Arbiter Recovery Ctrl Reg */ #define B3_MA_RC_TEST 0x01ca /* 16 bit MAC Arbiter Recovery Test Reg */ - /* 0x01cc - 0x01cf reserved */ -/* Packet Arbiter Registers, This are real timeouts */ -#define B3_PA_TOINI_RX1 0x01d0 /* 16 bit Timeout Init Val Rx Path MAC 1*/ + /* 0x01cc - 0x01cf: reserved */ + +/* Packet Arbiter Registers (GENESIS only) */ +/* these are real timeouts */ +#define B3_PA_TOINI_RX1 0x01d0 /* 16 bit Timeout Init Val Rx Path MAC 1 */ /* 0x01d2 - 0x01d3: reserved */ -#define B3_PA_TOINI_RX2 0x01d4 /* 16 bit Timeout Init Val Rx Path MAC 2*/ +#define B3_PA_TOINI_RX2 0x01d4 /* 16 bit Timeout Init Val Rx Path MAC 2 */ /* 0x01d6 - 0x01d7: reserved */ -#define B3_PA_TOINI_TX1 0x01d8 /* 16 bit Timeout Init Val Tx Path MAC 1*/ +#define B3_PA_TOINI_TX1 0x01d8 /* 16 bit Timeout Init Val Tx Path MAC 1 */ /* 0x01da - 0x01db: reserved */ -#define B3_PA_TOINI_TX2 0x01dc /* 16 bit Timeout Init Val Tx Path MAC 2*/ +#define B3_PA_TOINI_TX2 0x01dc /* 16 bit Timeout Init Val Tx Path MAC 2 */ /* 0x01de - 0x01df: reserved */ #define B3_PA_TOVAL_RX1 0x01e0 /* 16 bit Timeout Val Rx Path MAC 1 */ /* 0x01e2 - 0x01e3: reserved */ @@ -568,8 +717,7 @@ /* * Bank 4 - 5 */ - -/* Transmit Arbiter Registers MAC 1 and 2, user MR_ADDR() to address */ +/* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */ #define TXA_ITI_INI 0x0200 /* 32 bit Tx Arb Interval Timer Init Val*/ #define TXA_ITI_VAL 0x0204 /* 32 bit Tx Arb Interval Timer Value */ #define TXA_LIM_INI 0x0208 /* 32 bit Tx Arb Limit Counter Init Val */ @@ -578,11 +726,13 @@ #define TXA_TEST 0x0211 /* 8 bit Tx Arbiter Test Register */ #define TXA_STAT 0x0212 /* 8 bit Tx Arbiter Status Register */ /* 0x0213 - 0x027f: reserved */ + /* 0x0280 - 0x0292: MAC 2 */ + /* 0x0213 - 0x027f: reserved */ /* * Bank 6 */ -/* External registers */ +/* External registers (GENESIS only) */ #define B6_EXT_REG 0x0300 /* @@ -595,7 +745,7 @@ * Bank 8 - 15 */ /* Receive and Transmit Queue Registers, use Q_ADDR() to access */ -#define B8_Q_REGS 0x0400 +#define B8_Q_REGS 0x0400 /* Queue Register Offsets, use Q_ADDR() to access */ #define Q_D 0x00 /* 8*32 bit Current Descriptor */ @@ -619,254 +769,371 @@ * Bank 16 - 23 */ /* RAM Buffer Registers */ -#define B16_RAM_REGS 0x0800 +#define B16_RAM_REGS 0x0800 -/* RAM Buffer Register Offsets */ -/* use RB_ADDR(Queue,Offs) to address */ -#define RB_START 0x00 /* 32 bit RAM Buffer Start Address */ -#define RB_END 0x04 /* 32 bit RAM Buffer End Address */ -#define RB_WP 0x08 /* 32 bit RAM Buffer Write Pointer */ -#define RB_RP 0x0c /* 32 bit RAM Buffer Read Pointer */ -#define RB_RX_UTPP 0x10 /* 32 bit Rx Upper Threshold, Pause Pack*/ -#define RB_RX_LTPP 0x14 /* 32 bit Rx Lower Threshold, Pause Pack*/ -#define RB_RX_UTHP 0x18 /* 32 bit Rx Upper Threshold, High Prio */ -#define RB_RX_LTHP 0x1c /* 32 bit Rx Lower Threshold, High Prio */ - /* 0x10 - 0x1f: reserved for Tx RAM Buffer Registers */ -#define RB_PC 0x20 /* 32 bit RAM Buffer Packet Counter */ -#define RB_LEV 0x24 /* 32 bit RAM Buffer Level Register */ -#define RB_CTRL 0x28 /* 8 bit RAM Buffer Control Register */ -#define RB_TST1 0x29 /* 8 bit RAM Buffer Test Register 1 */ -#define RB_TST2 0x2A /* 8 bit RAM Buffer Test Register 2 */ - /* 0x2c - 0x7f: reserved */ +/* RAM Buffer Register Offsets, use RB_ADDR() to access */ +#define RB_START 0x00 /* 32 bit RAM Buffer Start Address */ +#define RB_END 0x04 /* 32 bit RAM Buffer End Address */ +#define RB_WP 0x08 /* 32 bit RAM Buffer Write Pointer */ +#define RB_RP 0x0c /* 32 bit RAM Buffer Read Pointer */ +#define RB_RX_UTPP 0x10 /* 32 bit Rx Upper Threshold, Pause Pack */ +#define RB_RX_LTPP 0x14 /* 32 bit Rx Lower Threshold, Pause Pack */ +#define RB_RX_UTHP 0x18 /* 32 bit Rx Upper Threshold, High Prio */ +#define RB_RX_LTHP 0x1c /* 32 bit Rx Lower Threshold, High Prio */ + /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */ +#define RB_PC 0x20 /* 32 bit RAM Buffer Packet Counter */ +#define RB_LEV 0x24 /* 32 bit RAM Buffer Level Register */ +#define RB_CTRL 0x28 /* 8 bit RAM Buffer Control Register */ +#define RB_TST1 0x29 /* 8 bit RAM Buffer Test Register 1 */ +#define RB_TST2 0x2A /* 8 bit RAM Buffer Test Register 2 */ + /* 0x2c - 0x7f: reserved */ /* - * Bank 24 - 25 + * Bank 24 + */ +/* + * Receive MAC FIFO, Receive LED, and Link_Sync regs (GENESIS only) + * use MR_ADDR() to access */ -/* Receive MAC FIFO, Receive LED, and Link Sync regs, use MR_ADDR() to address*/ #define RX_MFF_EA 0x0c00 /* 32 bit Receive MAC FIFO End Address */ -#define RX_MFF_WP 0x0c04 /* 32 bit Receive MAC FIFO Write Pointer*/ - /* 0x0c08 - 0x0c0b reserved */ +#define RX_MFF_WP 0x0c04 /* 32 bit Receive MAC FIFO Write Pointer */ + /* 0x0c08 - 0x0c0b: reserved */ #define RX_MFF_RP 0x0c0c /* 32 bit Receive MAC FIFO Read Pointer */ #define RX_MFF_PC 0x0c10 /* 32 bit Receive MAC FIFO Packet Cnt */ #define RX_MFF_LEV 0x0c14 /* 32 bit Receive MAC FIFO Level */ #define RX_MFF_CTRL1 0x0c18 /* 16 bit Receive MAC FIFO Control Reg 1*/ #define RX_MFF_STAT_TO 0x0c1a /* 8 bit Receive MAC Status Timeout */ -#define RX_MFF_TIST_TO 0x0c1b /* 8 bit Receive MAC Timestamp Timeout */ +#define RX_MFF_TIST_TO 0x0c1b /* 8 bit Receive MAC Time Stamp Timeout */ #define RX_MFF_CTRL2 0x0c1c /* 8 bit Receive MAC FIFO Control Reg 2*/ #define RX_MFF_TST1 0x0c1d /* 8 bit Receive MAC FIFO Test Reg 1 */ #define RX_MFF_TST2 0x0c1e /* 8 bit Receive MAC FIFO Test Reg 2 */ - /* 0x0c1f reserved */ + /* 0x0c1f: reserved */ #define RX_LED_INI 0x0c20 /* 32 bit Receive LED Cnt Init Value */ #define RX_LED_VAL 0x0c24 /* 32 bit Receive LED Cnt Current Value */ #define RX_LED_CTRL 0x0c28 /* 8 bit Receive LED Cnt Control Reg */ #define RX_LED_TST 0x0c29 /* 8 bit Receive LED Cnt Test Register */ - /* 0x0c2a - 0x0c2f reserved */ + /* 0x0c2a - 0x0c2f: reserved */ #define LNK_SYNC_INI 0x0c30 /* 32 bit Link Sync Cnt Init Value */ #define LNK_SYNC_VAL 0x0c34 /* 32 bit Link Sync Cnt Current Value */ -#define LNK_SYNC_CTRL 0x0c38 /* 8 bit Link Sync Cnt Control Register*/ +#define LNK_SYNC_CTRL 0x0c38 /* 8 bit Link Sync Cnt Control Register */ #define LNK_SYNC_TST 0x0c39 /* 8 bit Link Sync Cnt Test Register */ - /* 0x0c3a - 0x0c3b reserved */ + /* 0x0c3a - 0x0c3b: reserved */ #define LNK_LED_REG 0x0c3c /* 8 bit Link LED Register */ - /* 0x0c3d - 0x0c7f reserved */ + /* 0x0c3d - 0x0c3f: reserved */ + +/* Receive GMAC FIFO (YUKON only), use MR_ADDR() to access */ +#define RX_GMF_EA 0x0c40 /* 32 bit Rx GMAC FIFO End Address */ +#define RX_GMF_AF_THR 0x0c44 /* 32 bit Rx GMAC FIFO Almost Full Thresh. */ +#define RX_GMF_CTRL_T 0x0c48 /* 32 bit Rx GMAC FIFO Control/Test */ +#define RX_GMF_FL_MSK 0x0c4c /* 32 bit Rx GMAC FIFO Flush Mask */ +#define RX_GMF_FL_THR 0x0c50 /* 32 bit Rx GMAC FIFO Flush Threshold */ + /* 0x0c54 - 0x0c5f: reserved */ +#define RX_GMF_WP 0x0c60 /* 32 bit Rx GMAC FIFO Write Pointer */ + /* 0x0c64 - 0x0c67: reserved */ +#define RX_GMF_WLEV 0x0c68 /* 32 bit Rx GMAC FIFO Write Level */ + /* 0x0c6c - 0x0c6f: reserved */ +#define RX_GMF_RP 0x0c70 /* 32 bit Rx GMAC FIFO Read Pointer */ + /* 0x0c74 - 0x0c77: reserved */ +#define RX_GMF_RLEV 0x0c78 /* 32 bit Rx GMAC FIFO Read Level */ + /* 0x0c7c - 0x0c7f: reserved */ + +/* + * Bank 25 + */ + /* 0x0c80 - 0x0cbf: MAC 2 */ + /* 0x0cc0 - 0x0cff: reserved */ /* - * Bank 26 - 27 + * Bank 26 + */ +/* + * Transmit MAC FIFO and Transmit LED Registers (GENESIS only), + * use MR_ADDR() to access */ -/* Transmit MAC FIFO and Transmit LED Registers, use MR_ADDR() to address */ #define TX_MFF_EA 0x0d00 /* 32 bit Transmit MAC FIFO End Address */ #define TX_MFF_WP 0x0d04 /* 32 bit Transmit MAC FIFO WR Pointer */ -#define TX_MFF_WSP 0x0d08 /* 32 bit Transmit MAC FIFO WR Shadow Pt*/ +#define TX_MFF_WSP 0x0d08 /* 32 bit Transmit MAC FIFO WR Shadow Ptr */ #define TX_MFF_RP 0x0d0c /* 32 bit Transmit MAC FIFO RD Pointer */ #define TX_MFF_PC 0x0d10 /* 32 bit Transmit MAC FIFO Packet Cnt */ #define TX_MFF_LEV 0x0d14 /* 32 bit Transmit MAC FIFO Level */ #define TX_MFF_CTRL1 0x0d18 /* 16 bit Transmit MAC FIFO Ctrl Reg 1 */ -#define TX_MFF_WAF 0x0d1a /* 8 bit Transmit MAC Wait after flush*/ - /* 0x0c1b reserved */ +#define TX_MFF_WAF 0x0d1a /* 8 bit Transmit MAC Wait after flush */ + /* 0x0c1b: reserved */ #define TX_MFF_CTRL2 0x0d1c /* 8 bit Transmit MAC FIFO Ctrl Reg 2 */ #define TX_MFF_TST1 0x0d1d /* 8 bit Transmit MAC FIFO Test Reg 1 */ #define TX_MFF_TST2 0x0d1e /* 8 bit Transmit MAC FIFO Test Reg 2 */ - /* 0x0d1f reserved */ + /* 0x0d1f: reserved */ #define TX_LED_INI 0x0d20 /* 32 bit Transmit LED Cnt Init Value */ #define TX_LED_VAL 0x0d24 /* 32 bit Transmit LED Cnt Current Val */ #define TX_LED_CTRL 0x0d28 /* 8 bit Transmit LED Cnt Control Reg */ -#define TX_LED_TST 0x0d29 /* 8 bit Transmit LED Cnt Test Register*/ - /* 0x0d2a - 0x0d7f reserved */ +#define TX_LED_TST 0x0d29 /* 8 bit Transmit LED Cnt Test Reg */ + /* 0x0d2a - 0x0d3f: reserved */ + +/* Transmit GMAC FIFO (YUKON only), use MR_ADDR() to access */ +#define TX_GMF_EA 0x0d40 /* 32 bit Tx GMAC FIFO End Address */ +#define TX_GMF_AE_THR 0x0d44 /* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/ +#define TX_GMF_CTRL_T 0x0d48 /* 32 bit Tx GMAC FIFO Control/Test */ + /* 0x0d4c - 0x0d5f: reserved */ +#define TX_GMF_WP 0x0d60 /* 32 bit Tx GMAC FIFO Write Pointer */ +#define TX_GMF_WSP 0x0d64 /* 32 bit Tx GMAC FIFO Write Shadow Ptr. */ +#define TX_GMF_WLEV 0x0d68 /* 32 bit Tx GMAC FIFO Write Level */ + /* 0x0d6c - 0x0d6f: reserved */ +#define TX_GMF_RP 0x0d70 /* 32 bit Tx GMAC FIFO Read Pointer */ +#define TX_GMF_RSTP 0x0d74 /* 32 bit Tx GMAC FIFO Restart Pointer */ +#define TX_GMF_RLEV 0x0d78 /* 32 bit Tx GMAC FIFO Read Level */ + /* 0x0d7c - 0x0d7f: reserved */ + +/* + * Bank 27 + */ + /* 0x0d80 - 0x0dbf: MAC 2 */ + /* 0x0daa - 0x0dff: reserved */ /* * Bank 28 */ /* Descriptor Poll Timer Registers */ -#define B28_DPT_INI 0x0e00 /* 32 bit Descriptor Poll Timer Init Val*/ -#define B28_DPT_VAL 0x0e04 /* 32 bit Descriptor Poll Timer Curr Val*/ -#define B28_DPT_CTRL 0x0e08 /* 8 bit Descriptor Poll Timer Ctrl Reg*/ - /* 0x0e09: reserved */ -#define B28_DPT_TST 0x0e0a /* 8 bit Descriptor Poll Timer Test Reg*/ - /* 0x0e0b - 0x0e8f: reserved */ +#define B28_DPT_INI 0x0e00 /* 24 bit Descriptor Poll Timer Init Val */ +#define B28_DPT_VAL 0x0e04 /* 24 bit Descriptor Poll Timer Curr Val */ +#define B28_DPT_CTRL 0x0e08 /* 8 bit Descriptor Poll Timer Ctrl Reg */ + /* 0x0e09: reserved */ +#define B28_DPT_TST 0x0e0a /* 8 bit Descriptor Poll Timer Test Reg */ + /* 0x0e0b: reserved */ + +/* Time Stamp Timer Registers (YUKON only) */ + /* 0x0e10: reserved */ +#define GMAC_TI_ST_VAL 0x0e14 /* 32 bit Time Stamp Timer Curr Val */ +#define GMAC_TI_ST_CTRL 0x0e18 /* 8 bit Time Stamp Timer Ctrl Reg */ + /* 0x0e19: reserved */ +#define GMAC_TI_ST_TST 0x0e1a /* 8 bit Time Stamp Timer Test Reg */ + /* 0x0e1b - 0x0e7f: reserved */ + +/* + * Bank 29 + */ + /* 0x0e80 - 0x0efc: reserved */ + +/* + * Bank 30 + */ +/* GMAC and GPHY Control Registers (YUKON only) */ +#define GMAC_CTRL 0x0f00 /* 32 bit GMAC Control Reg */ +#define GPHY_CTRL 0x0f04 /* 32 bit GPHY Control Reg */ +#define GMAC_IRQ_SRC 0x0f08 /* 8 bit GMAC Interrupt Source Reg */ + /* 0x0f09 - 0x0f0b: reserved */ +#define GMAC_IRQ_MSK 0x0f0c /* 8 bit GMAC Interrupt Mask Reg */ + /* 0x0f0d - 0x0f0f: reserved */ +#define GMAC_LINK_CTRL 0x0f10 /* 16 bit Link Control Reg */ + /* 0x0f14 - 0x0f1f: reserved */ + +/* Wake-up Frame Pattern Match Control Registers (YUKON only) */ + +#define WOL_REG_OFFS 0x20 /* HW-Bug: Address is + 0x20 against spec. */ + +#define WOL_CTRL_STAT 0x0f20 /* 16 bit WOL Control/Status Reg */ +#define WOL_MATCH_CTL 0x0f22 /* 8 bit WOL Match Control Reg */ +#define WOL_MATCH_RES 0x0f23 /* 8 bit WOL Match Result Reg */ +#define WOL_MAC_ADDR_LO 0x0f24 /* 32 bit WOL MAC Address Low */ +#define WOL_MAC_ADDR_HI 0x0f28 /* 16 bit WOL MAC Address High */ +#define WOL_PATT_RPTR 0x0f2c /* 8 bit WOL Pattern Read Ptr */ + +/* use this macro to access above registers */ +#define WOL_REG(Reg) ((Reg) + (pAC->GIni.GIWolOffs)) + + +/* WOL Pattern Length Registers (YUKON only) */ + +#define WOL_PATT_LEN_LO 0x0f30 /* 32 bit WOL Pattern Length 3..0 */ +#define WOL_PATT_LEN_HI 0x0f34 /* 24 bit WOL Pattern Length 6..4 */ + +/* WOL Pattern Counter Registers (YUKON only) */ + +#define WOL_PATT_CNT_0 0x0f38 /* 32 bit WOL Pattern Counter 3..0 */ +#define WOL_PATT_CNT_4 0x0f3c /* 24 bit WOL Pattern Counter 6..4 */ + /* 0x0f40 - 0x0f7f: reserved */ + +/* + * Bank 31 + */ +/* 0x0f80 - 0x0fff: reserved */ /* - * Bank 29 - 31 + * Bank 32 - 33 */ -/* 0x0e90 - 0x0fff: reserved */ +#define WOL_PATT_RAM_1 0x1000 /* WOL Pattern RAM Link 1 */ /* - * Bank 0x20 - 0x3f + * Bank 0x22 - 0x3f */ -/* 0x1000 - 0x1fff: reserved */ +/* 0x1100 - 0x1fff: reserved */ /* * Bank 0x40 - 0x4f */ -/* XMAC 1 registers */ -#define B40_XMAC1 0x2000 +#define BASE_XMAC_1 0x2000 /* XMAC 1 registers */ /* * Bank 0x50 - 0x5f */ -/* 0x2800 - 0x2fff: reserved */ + +#define BASE_GMAC_1 0x2800 /* GMAC 1 registers */ /* * Bank 0x60 - 0x6f */ -/* XMAC 2 registers */ -#define B40_XMAC2 0x3000 +#define BASE_XMAC_2 0x3000 /* XMAC 2 registers */ /* * Bank 0x70 - 0x7f */ -/* 0x3800 - 0x3fff: reserved */ +#define BASE_GMAC_2 0x3800 /* GMAC 2 registers */ /* * Control Register Bit Definitions: */ /* B0_RAP 8 bit Register Address Port */ - /* Bit 7: reserved */ -#define RAP_RAP 0x3f /* Bit 6..0: 0 = block 0, .., 6f = block 6f*/ + /* Bit 7: reserved */ +#define RAP_RAP 0x3f /* Bit 6..0: 0 = block 0,..,6f = block 6f */ /* B0_CTST 16 bit Control/Status register */ - /* Bit 15..10: reserved */ -#define CS_BUS_CLOCK (1<<9) /* Bit 9: Bus Clock 0/1 = 33/66MHz */ -#define CS_BUS_SLOT_SZ (1<<8) /* Bit 8: Slot Size 0/1 = 32/64 bit slot*/ -#define CS_ST_SW_IRQ (1<<7) /* Bit 7: Set IRQ SW Request */ -#define CS_CL_SW_IRQ (1<<6) /* Bit 6: Clear IRQ SW Request */ -#define CS_STOP_DONE (1<<5) /* Bit 5: Stop Master is finished */ -#define CS_STOP_MAST (1<<4) /* Bit 4: Command Bit to stop the master*/ -#define CS_MRST_CLR (1<<3) /* Bit 3: Clear Master reset */ -#define CS_MRST_SET (1<<2) /* Bit 2: Set Master reset */ -#define CS_RST_CLR (1<<1) /* Bit 1: Clear Software reset */ -#define CS_RST_SET (1<<0) /* Bit 0: Set Software reset */ + /* Bit 15..11: reserved */ +#define CS_VAUX_AVAIL BIT_10S /* VAUX available (YUKON only) */ +#define CS_BUS_CLOCK BIT_9S /* Bus Clock 0/1 = 33/66 MHz */ +#define CS_BUS_SLOT_SZ BIT_8S /* Slot Size 0/1 = 32/64 bit slot */ +#define CS_ST_SW_IRQ BIT_7S /* Set IRQ SW Request */ +#define CS_CL_SW_IRQ BIT_6S /* Clear IRQ SW Request */ +#define CS_STOP_DONE BIT_5S /* Stop Master is finished */ +#define CS_STOP_MAST BIT_4S /* Command Bit to stop the master */ +#define CS_MRST_CLR BIT_3S /* Clear Master reset */ +#define CS_MRST_SET BIT_2S /* Set Master reset */ +#define CS_RST_CLR BIT_1S /* Clear Software reset */ +#define CS_RST_SET BIT_0S /* Set Software reset */ /* B0_LED 8 Bit LED register */ - /* Bit 7..2: reserved */ -#define LED_STAT_ON (1<<1) /* Bit 1: Status LED on */ -#define LED_STAT_OFF (1<<0) /* Bit 0: Status LED off */ + /* Bit 7.. 2: reserved */ +#define LED_STAT_ON BIT_1S /* Status LED on */ +#define LED_STAT_OFF BIT_0S /* Status LED off */ + +/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */ +#define PC_VAUX_ENA BIT_7 /* Switch VAUX Enable */ +#define PC_VAUX_DIS BIT_6 /* Switch VAUX Disable */ +#define PC_VCC_ENA BIT_5 /* Switch VCC Enable */ +#define PC_VCC_DIS BIT_4 /* Switch VCC Disable */ +#define PC_VAUX_ON BIT_3 /* Switch VAUX On */ +#define PC_VAUX_OFF BIT_2 /* Switch VAUX Off */ +#define PC_VCC_ON BIT_1 /* Switch VCC On */ +#define PC_VCC_OFF BIT_0 /* Switch VCC Off */ /* B0_ISRC 32 bit Interrupt Source Register */ /* B0_IMSK 32 bit Interrupt Mask Register */ /* B0_SP_ISRC 32 bit Special Interrupt Source Reg */ /* B2_IRQM_MSK 32 bit IRQ Moderation Mask */ #define IS_ALL_MSK 0xbfffffffL /* All Interrupt bits */ -#define IS_HW_ERR (1UL<<31) /* Bit 31: Interrupt HW Error */ - /* Bit 30: reserved */ -#define IS_PA_TO_RX1 (1L<<29) /* Bit 29: Packet Arb Timeout Rx1*/ -#define IS_PA_TO_RX2 (1L<<28) /* Bit 28: Packet Arb Timeout Rx2*/ -#define IS_PA_TO_TX1 (1L<<27) /* Bit 27: Packet Arb Timeout Tx1*/ -#define IS_PA_TO_TX2 (1L<<26) /* Bit 26: Packet Arb Timeout Tx2*/ -#define IS_I2C_READY (1L<<25) /* Bit 25: IRQ on end of I2C tx */ -#define IS_IRQ_SW (1L<<24) /* Bit 24: SW forced IRQ */ -#define IS_EXT_REG (1L<<23) /* Bit 23: IRQ from external reg */ -#define IS_TIMINT (1L<<22) /* Bit 22: IRQ from Timer */ -#define IS_MAC1 (1L<<21) /* Bit 21: IRQ from MAC 1 */ -#define IS_LNK_SYNC_M1 (1L<<20) /* Bit 20: Link Sync Cnt wrap M1 */ -#define IS_MAC2 (1L<<19) /* Bit 19: IRQ from MAC 2 */ -#define IS_LNK_SYNC_M2 (1L<<18) /* Bit 18: Link Sync Cnt wrap M2 */ +#define IS_HW_ERR BIT_31 /* Interrupt HW Error */ + /* Bit 30: reserved */ +#define IS_PA_TO_RX1 BIT_29 /* Packet Arb Timeout Rx1 */ +#define IS_PA_TO_RX2 BIT_28 /* Packet Arb Timeout Rx2 */ +#define IS_PA_TO_TX1 BIT_27 /* Packet Arb Timeout Tx1 */ +#define IS_PA_TO_TX2 BIT_26 /* Packet Arb Timeout Tx2 */ +#define IS_I2C_READY BIT_25 /* IRQ on end of I2C Tx */ +#define IS_IRQ_SW BIT_24 /* SW forced IRQ */ +#define IS_EXT_REG BIT_23 /* IRQ from LM80 or PHY (GENESIS only) */ + /* IRQ from PHY (YUKON only) */ +#define IS_TIMINT BIT_22 /* IRQ from Timer */ +#define IS_MAC1 BIT_21 /* IRQ from MAC 1 */ +#define IS_LNK_SYNC_M1 BIT_20 /* Link Sync Cnt wrap MAC 1 */ +#define IS_MAC2 BIT_19 /* IRQ from MAC 2 */ +#define IS_LNK_SYNC_M2 BIT_18 /* Link Sync Cnt wrap MAC 2 */ /* Receive Queue 1 */ -#define IS_R1_B (1L<<17) /* Bit 17: Q_R1 End of Buffer */ -#define IS_R1_F (1L<<16) /* Bit 16: Q_R1 End of Frame */ -#define IS_R1_C (1L<<15) /* Bit 15: Q_R1 Encoding Error */ +#define IS_R1_B BIT_17 /* Q_R1 End of Buffer */ +#define IS_R1_F BIT_16 /* Q_R1 End of Frame */ +#define IS_R1_C BIT_15 /* Q_R1 Encoding Error */ /* Receive Queue 2 */ -#define IS_R2_B (1L<<14) /* Bit 14: Q_R2 End of Buffer */ -#define IS_R2_F (1L<<13) /* Bit 13: Q_R2 End of Frame */ -#define IS_R2_C (1L<<12) /* Bit 12: Q_R2 Encoding Error */ +#define IS_R2_B BIT_14 /* Q_R2 End of Buffer */ +#define IS_R2_F BIT_13 /* Q_R2 End of Frame */ +#define IS_R2_C BIT_12 /* Q_R2 Encoding Error */ /* Synchronous Transmit Queue 1 */ -#define IS_XS1_B (1L<<11) /* Bit 11: Q_XS1 End of Buffer */ -#define IS_XS1_F (1L<<10) /* Bit 10: Q_XS1 End of Frame */ -#define IS_XS1_C (1L<<9) /* Bit 9: Q_XS1 Encoding Error */ +#define IS_XS1_B BIT_11 /* Q_XS1 End of Buffer */ +#define IS_XS1_F BIT_10 /* Q_XS1 End of Frame */ +#define IS_XS1_C BIT_9 /* Q_XS1 Encoding Error */ /* Asynchronous Transmit Queue 1 */ -#define IS_XA1_B (1L<<8) /* Bit 8: Q_XA1 End of Buffer */ -#define IS_XA1_F (1L<<7) /* Bit 7: Q_XA1 End of Frame */ -#define IS_XA1_C (1L<<6) /* Bit 6: Q_XA1 Encoding Error */ +#define IS_XA1_B BIT_8 /* Q_XA1 End of Buffer */ +#define IS_XA1_F BIT_7 /* Q_XA1 End of Frame */ +#define IS_XA1_C BIT_6 /* Q_XA1 Encoding Error */ /* Synchronous Transmit Queue 2 */ -#define IS_XS2_B (1L<<5) /* Bit 5: Q_XS2 End of Buffer */ -#define IS_XS2_F (1L<<4) /* Bit 4: Q_XS2 End of Frame */ -#define IS_XS2_C (1L<<3) /* Bit 3: Q_XS2 Encoding Error */ +#define IS_XS2_B BIT_5 /* Q_XS2 End of Buffer */ +#define IS_XS2_F BIT_4 /* Q_XS2 End of Frame */ +#define IS_XS2_C BIT_3 /* Q_XS2 Encoding Error */ /* Asynchronous Transmit Queue 2 */ -#define IS_XA2_B (1L<<2) /* Bit 2: Q_XA2 End of Buffer */ -#define IS_XA2_F (1L<<1) /* Bit 1: Q_XA2 End of Frame */ -#define IS_XA2_C (1L<<0) /* Bit 0: Q_XA2 Encoding Error */ +#define IS_XA2_B BIT_2 /* Q_XA2 End of Buffer */ +#define IS_XA2_F BIT_1 /* Q_XA2 End of Frame */ +#define IS_XA2_C BIT_0 /* Q_XA2 Encoding Error */ /* B0_HWE_ISRC 32 bit HW Error Interrupt Src Reg */ /* B0_HWE_IMSK 32 bit HW Error Interrupt Mask Reg */ /* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ #define IS_ERR_MSK 0x00000fffL /* All Error bits */ - /* Bit 31..12: reserved */ -#define IS_IRQ_MST_ERR (1L<<11) /* Bit 11: IRQ master error */ - /* PERR,RMABORT,RTABORT,DATAPERR */ -#define IS_IRQ_STAT (1L<<10) /* Bit 10: IRQ status execption */ - /* RMABORT, RTABORT, DATAPERR */ -#define IS_NO_STAT_M1 (1L<<9) /* Bit 9: No Rx Status from MAC1*/ -#define IS_NO_STAT_M2 (1L<<8) /* Bit 8: No Rx Status from MAC2*/ -#define IS_NO_TIST_M1 (1L<<7) /* Bit 7: No Timestamp from MAC1*/ -#define IS_NO_TIST_M2 (1L<<6) /* Bit 6: No Timestamp from MAC2*/ -#define IS_RAM_RD_PAR (1L<<5) /* Bit 5: RAM Read Parity Error */ -#define IS_RAM_WR_PAR (1L<<4) /* Bit 4: RAM Write Parity Error*/ -#define IS_M1_PAR_ERR (1L<<3) /* Bit 3: MAC 1 Parity Error */ -#define IS_M2_PAR_ERR (1L<<2) /* Bit 2: MAC 2 Parity Error */ -#define IS_R1_PAR_ERR (1L<<1) /* Bit 1: Queue R1 Parity Error */ -#define IS_R2_PAR_ERR (1L<<0) /* Bit 0: Queue R2 Parity Error */ + /* Bit 31..14: reserved */ +#define IS_IRQ_TIST_OV BIT_13 /* Time Stamp Timer Overflow (YUKON only) */ +#define IS_IRQ_SENSOR BIT_12 /* IRQ from Sensor (YUKON only) */ +#define IS_IRQ_MST_ERR BIT_11 /* IRQ master error detected */ +#define IS_IRQ_STAT BIT_10 /* IRQ status exception */ +#define IS_NO_STAT_M1 BIT_9 /* No Rx Status from MAC 1 */ +#define IS_NO_STAT_M2 BIT_8 /* No Rx Status from MAC 2 */ +#define IS_NO_TIST_M1 BIT_7 /* No Time Stamp from MAC 1 */ +#define IS_NO_TIST_M2 BIT_6 /* No Time Stamp from MAC 2 */ +#define IS_RAM_RD_PAR BIT_5 /* RAM Read Parity Error */ +#define IS_RAM_WR_PAR BIT_4 /* RAM Write Parity Error */ +#define IS_M1_PAR_ERR BIT_3 /* MAC 1 Parity Error */ +#define IS_M2_PAR_ERR BIT_2 /* MAC 2 Parity Error */ +#define IS_R1_PAR_ERR BIT_1 /* Queue R1 Parity Error */ +#define IS_R2_PAR_ERR BIT_0 /* Queue R2 Parity Error */ /* B2_CONN_TYP 8 bit Connector type */ /* B2_PMD_TYP 8 bit PMD type */ /* Values of connector and PMD type comply to SysKonnect internal std */ -/* B2_MAC_CFG 8 bit MAC Configuration */ - /* Bit 7..2: reserved */ -#define CFG_DIS_M2_CLK (1<<1) /* Bit 1: Disable Clock for 2nd MAC */ -#define CFG_SNG_MAC (1<<0) /* Bit 0: MAC Config: 1=2 MACs / 0=1 MAC*/ - -/* B2_CHIP_REV 8 bit Queen Chip Revision Number */ -#define FIRST_CHIP_REV 0x0a /* Initial Revision Value */ +/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */ +#define CFG_CHIP_R_MSK (0xf<<4) /* Bit 7.. 4: Chip Revision */ + /* Bit 3.. 2: reserved */ +#define CFG_DIS_M2_CLK BIT_1S /* Disable Clock for 2nd MAC */ +#define CFG_SNG_MAC BIT_0S /* MAC Config: 0=2 MACs / 1=1 MAC*/ + +/* B2_CHIP_ID 8 bit Chip Identification Number */ +#define CHIP_ID_GENESIS 0x0a /* Chip ID for GENESIS */ +#define CHIP_ID_YUKON 0xb0 /* Chip ID for YUKON */ /* B2_FAR 32 bit Flash-Prom Addr Reg/Cnt */ -#define FAR_ADDR 0x1ffffL /* Bit 16..0: FPROM Address mask */ +#define FAR_ADDR 0x1ffffL /* Bit 16.. 0: FPROM Address mask */ /* B2_LD_CRTL 8 bit EPROM loader control register */ /* Bits are currently reserved */ /* B2_LD_TEST 8 bit EPROM loader test register */ - /* Bit 7..4: reserved */ -#define LD_T_ON (1<<3) /* Bit 3: Loader Testmode on */ -#define LD_T_OFF (1<<2) /* Bit 2: Loader Testmode off */ -#define LD_T_STEP (1<<1) /* Bit 1: Decrement FPROM addr. Counter */ -#define LD_START (1<<0) /* Bit 0: Start loading FPROM */ + /* Bit 7.. 4: reserved */ +#define LD_T_ON BIT_3S /* Loader Test mode on */ +#define LD_T_OFF BIT_2S /* Loader Test mode off */ +#define LD_T_STEP BIT_1S /* Decrement FPROM addr. Counter */ +#define LD_START BIT_0S /* Start loading FPROM */ /* * Timer Section */ /* B2_TI_CRTL 8 bit Timer control */ /* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */ - /* Bit 7..3: reserved */ -#define TIM_START (1<<2) /* Bit 2: Start Timer */ -#define TIM_STOP (1<<1) /* Bit 1: Stop Timer */ -#define TIM_CLR_IRQ (1<<0) /* Bit 0: Clear Timer IRQ, (!IRQM) */ + /* Bit 7.. 3: reserved */ +#define TIM_START BIT_2S /* Start Timer */ +#define TIM_STOP BIT_1S /* Stop Timer */ +#define TIM_CLR_IRQ BIT_0S /* Clear Timer IRQ (!IRQM) */ /* B2_TI_TEST 8 Bit Timer Test */ /* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */ /* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */ - /* Bit 7..3: reserved */ -#define TIM_T_ON (1<<2) /* Bit 2: Test mode on */ -#define TIM_T_OFF (1<<1) /* Bit 1: Test mode off */ -#define TIM_T_STEP (1<<0) /* Bit 0: Test step */ + /* Bit 7.. 3: reserved */ +#define TIM_T_ON BIT_2S /* Test mode on */ +#define TIM_T_OFF BIT_1S /* Test mode off */ +#define TIM_T_STEP BIT_0S /* Test step */ /* B28_DPT_INI 32 bit Descriptor Poll Timer Init Val */ /* B28_DPT_VAL 32 bit Descriptor Poll Timer Curr Val */ @@ -874,100 +1141,101 @@ #define DPT_MSK 0x00ffffffL /* Bit 23.. 0: Desc Poll Timer Bits */ /* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */ - /* Bit 7..2: reserved */ -#define DPT_START (1<<1) /* Bit 1: Start Desciptor Poll Timer */ -#define DPT_STOP (1<<0) /* Bit 0: Stop Desciptor Poll Timer */ + /* Bit 7.. 2: reserved */ +#define DPT_START BIT_1S /* Start Descriptor Poll Timer */ +#define DPT_STOP BIT_0S /* Stop Descriptor Poll Timer */ +/* B2_E_3 8 bit lower 4 bits used for HW self test result */ +#define B2_E3_RES_MASK 0x0f /* B2_TST_CTRL1 8 bit Test Control Register 1 */ -#define TST_FRC_DPERR_MR (1<<7) /* Bit 7: force DATAPERR on MST RD */ -#define TST_FRC_DPERR_MW (1<<6) /* Bit 6: force DATAPERR on MST WR */ -#define TST_FRC_DPERR_TR (1<<5) /* Bit 5: force DATAPERR on TRG RD */ -#define TST_FRC_DPERR_TW (1<<4) /* Bit 4: force DATAPERR on TRG WR */ -#define TST_FRC_APERR_M (1<<3) /* Bit 3: force ADDRPERR on MST */ -#define TST_FRC_APERR_T (1<<2) /* Bit 2: force ADDRPERR on TRG */ -#define TST_CFG_WRITE_ON (1<<1) /* Bit 1: Enable Config Reg WR */ -#define TST_CFG_WRITE_OFF (1<<0) /* Bit 0: Disable Config Reg WR */ +#define TST_FRC_DPERR_MR BIT_7S /* force DATAPERR on MST RD */ +#define TST_FRC_DPERR_MW BIT_6S /* force DATAPERR on MST WR */ +#define TST_FRC_DPERR_TR BIT_5S /* force DATAPERR on TRG RD */ +#define TST_FRC_DPERR_TW BIT_4S /* force DATAPERR on TRG WR */ +#define TST_FRC_APERR_M BIT_3S /* force ADDRPERR on MST */ +#define TST_FRC_APERR_T BIT_2S /* force ADDRPERR on TRG */ +#define TST_CFG_WRITE_ON BIT_1S /* Enable Config Reg WR */ +#define TST_CFG_WRITE_OFF BIT_0S /* Disable Config Reg WR */ /* B2_TST_CTRL2 8 bit Test Control Register 2 */ - /* Bit 7..4: reserved */ - /* force the following error on */ - /* the next master read/write */ -#define TST_FRC_DPERR_MR64 (1<<3) /* Bit 3: DataPERR RD 64 */ -#define TST_FRC_DPERR_MW64 (1<<2) /* Bit 2: DataPERR WR 64 */ -#define TST_FRC_APERR_1M64 (1<<1) /* Bit 1: AddrPERR on 1. phase */ -#define TST_FRC_APERR_2M64 (1<<0) /* Bit 0: AddrPERR on 2. phase */ - -/* B2_GP_IO 32 bit General Purpose IO Register */ - /* Bit 31..26: reserved */ -#define GP_DIR_9 (1L<<25) /* Bit 25: IO_9 direct, 0=I/1=O */ -#define GP_DIR_8 (1L<<24) /* Bit 24: IO_8 direct, 0=I/1=O */ -#define GP_DIR_7 (1L<<23) /* Bit 23: IO_7 direct, 0=I/1=O */ -#define GP_DIR_6 (1L<<22) /* Bit 22: IO_6 direct, 0=I/1=O */ -#define GP_DIR_5 (1L<<21) /* Bit 21: IO_5 direct, 0=I/1=O */ -#define GP_DIR_4 (1L<<20) /* Bit 20: IO_4 direct, 0=I/1=O */ -#define GP_DIR_3 (1L<<19) /* Bit 19: IO_3 direct, 0=I/1=O */ -#define GP_DIR_2 (1L<<18) /* Bit 18: IO_2 direct, 0=I/1=O */ -#define GP_DIR_1 (1L<<17) /* Bit 17: IO_1 direct, 0=I/1=O */ -#define GP_DIR_0 (1L<<16) /* Bit 16: IO_0 direct, 0=I/1=O */ - /* Bit 15..10: reserved */ -#define GP_IO_9 (1L<<9) /* Bit 9: IO_9 pin */ -#define GP_IO_8 (1L<<8) /* Bit 8: IO_8 pin */ -#define GP_IO_7 (1L<<7) /* Bit 7: IO_7 pin */ -#define GP_IO_6 (1L<<6) /* Bit 6: IO_6 pin */ -#define GP_IO_5 (1L<<5) /* Bit 5: IO_5 pin */ -#define GP_IO_4 (1L<<4) /* Bit 4: IO_4 pin */ -#define GP_IO_3 (1L<<3) /* Bit 3: IO_3 pin */ -#define GP_IO_2 (1L<<2) /* Bit 2: IO_2 pin */ -#define GP_IO_1 (1L<<1) /* Bit 1: IO_1 pin */ -#define GP_IO_0 (1L<<0) /* Bit 0: IO_0 pin */ + /* Bit 7.. 4: reserved */ + /* force the following error on the next master read/write */ +#define TST_FRC_DPERR_MR64 BIT_3S /* DataPERR RD 64 */ +#define TST_FRC_DPERR_MW64 BIT_2S /* DataPERR WR 64 */ +#define TST_FRC_APERR_1M64 BIT_1S /* AddrPERR on 1. phase */ +#define TST_FRC_APERR_2M64 BIT_0S /* AddrPERR on 2. phase */ + +/* B2_GP_IO 32 bit General Purpose I/O Register */ + /* Bit 31..26: reserved */ +#define GP_DIR_9 BIT_25 /* IO_9 direct, 0=I/1=O */ +#define GP_DIR_8 BIT_24 /* IO_8 direct, 0=I/1=O */ +#define GP_DIR_7 BIT_23 /* IO_7 direct, 0=I/1=O */ +#define GP_DIR_6 BIT_22 /* IO_6 direct, 0=I/1=O */ +#define GP_DIR_5 BIT_21 /* IO_5 direct, 0=I/1=O */ +#define GP_DIR_4 BIT_20 /* IO_4 direct, 0=I/1=O */ +#define GP_DIR_3 BIT_19 /* IO_3 direct, 0=I/1=O */ +#define GP_DIR_2 BIT_18 /* IO_2 direct, 0=I/1=O */ +#define GP_DIR_1 BIT_17 /* IO_1 direct, 0=I/1=O */ +#define GP_DIR_0 BIT_16 /* IO_0 direct, 0=I/1=O */ + /* Bit 15..10: reserved */ +#define GP_IO_9 BIT_9 /* IO_9 pin */ +#define GP_IO_8 BIT_8 /* IO_8 pin */ +#define GP_IO_7 BIT_7 /* IO_7 pin */ +#define GP_IO_6 BIT_6 /* IO_6 pin */ +#define GP_IO_5 BIT_5 /* IO_5 pin */ +#define GP_IO_4 BIT_4 /* IO_4 pin */ +#define GP_IO_3 BIT_3 /* IO_3 pin */ +#define GP_IO_2 BIT_2 /* IO_2 pin */ +#define GP_IO_1 BIT_1 /* IO_1 pin */ +#define GP_IO_0 BIT_0 /* IO_0 pin */ /* B2_I2C_CTRL 32 bit I2C HW Control Register */ -#define I2C_FLAG (1UL<<31) /* Bit 31: Start read/write if WR*/ -#define I2C_ADDR (0x7fffL<<16) /* Bit 30..16: Addr to be RD/WR */ -#define I2C_DEV_SEL (0x7fL<<9) /* Bit 15.. 9: I2C Device Select */ - /* Bit 8.. 5: reserved */ -#define I2C_BURST_LEN (1L<<4) /* Bit 4: Burst Len, 1/4 bytes */ +#define I2C_FLAG BIT_31 /* Start read/write if WR */ +#define I2C_ADDR (0x7fffL<<16) /* Bit 30..16: Addr to be RD/WR */ +#define I2C_DEV_SEL (0x7fL<<9) /* Bit 15.. 9: I2C Device Select */ + /* Bit 8.. 5: reserved */ +#define I2C_BURST_LEN BIT_4 /* Burst Len, 1/4 bytes */ #define I2C_DEV_SIZE (7L<<1) /* Bit 3.. 1: I2C Device Size */ #define I2C_025K_DEV (0L<<1) /* 0: 256 Bytes or smal. */ #define I2C_05K_DEV (1L<<1) /* 1: 512 Bytes */ -#define I2C_1K_DEV (2L<<1) /* 2: 1024 Bytes */ +#define I2C_1K_DEV (2L<<1) /* 2: 1024 Bytes */ #define I2C_2K_DEV (3L<<1) /* 3: 2048 Bytes */ -#define I2C_4K_DEV (4L<<1) /* 4: 4096 Bytes */ -#define I2C_8K_DEV (5L<<1) /* 5: 8192 Bytes */ -#define I2C_16K_DEV (6L<<1) /* 6: 16384 Bytes */ -#define I2C_32K_DEV (7L<<1) /* 7: 32768 Bytes */ -#define I2C_STOP (1L<<0) /* Bit 0: Interrupt I2C transfer*/ +#define I2C_4K_DEV (4L<<1) /* 4: 4096 Bytes */ +#define I2C_8K_DEV (5L<<1) /* 5: 8192 Bytes */ +#define I2C_16K_DEV (6L<<1) /* 6: 16384 Bytes */ +#define I2C_32K_DEV (7L<<1) /* 7: 32768 Bytes */ +#define I2C_STOP BIT_0 /* Interrupt I2C transfer */ /* B2_I2C_IRQ 32 bit I2C HW IRQ Register */ - /* Bit 31..1 reserved */ -#define I2C_CLR_IRQ (1<<0) /* Bit 0: Clear I2C IRQ */ + /* Bit 31.. 1 reserved */ +#define I2C_CLR_IRQ BIT_0 /* Clear I2C IRQ */ -/* B2_I2C_SW 32 bit I2C HW SW Port Register */ - /* Bit 7..3: reserved */ -#define I2C_DATA_DIR (1<<2) /* Bit 2: direction of I2C_DATA */ -#define I2C_DATA (1<<1) /* Bit 1: I2C Data Port */ -#define I2C_CLK (1<<0) /* Bit 0: I2C Clock Port */ +/* B2_I2C_SW 32 bit (8 bit access) I2C HW SW Port Register */ + /* Bit 7.. 3: reserved */ +#define I2C_DATA_DIR BIT_2S /* direction of I2C_DATA */ +#define I2C_DATA BIT_1S /* I2C Data Port */ +#define I2C_CLK BIT_0S /* I2C Clock Port */ /* * I2C Address */ -#define I2C_SENS_ADDR LM80_ADDR /* I2C Sensor Address, (Volt and Temp)*/ +#define I2C_SENS_ADDR LM80_ADDR /* I2C Sensor Address, (Volt and Temp)*/ /* B2_BSC_CTRL 8 bit Blink Source Counter Control */ - /* Bit 7..2: reserved */ -#define BSC_START (1<<1) /* Bit 1: Start Blink Source Counter */ -#define BSC_STOP (1<<0) /* Bit 0: Stop Blink Source Counter */ + /* Bit 7.. 2: reserved */ +#define BSC_START BIT_1S /* Start Blink Source Counter */ +#define BSC_STOP BIT_0S /* Stop Blink Source Counter */ /* B2_BSC_STAT 8 bit Blink Source Counter Status */ - /* Bit 7..1: reserved */ -#define BSC_SRC (1<<0) /* Bit 0: Blink Source, 0=Off / 1=On */ + /* Bit 7.. 1: reserved */ +#define BSC_SRC BIT_0S /* Blink Source, 0=Off / 1=On */ /* B2_BSC_TST 16 bit Blink Source Counter Test Reg */ -#define BSC_T_ON (1<<2) /* Bit 2: Test mode on */ -#define BSC_T_OFF (1<<1) /* Bit 1: Test mode off */ -#define BSC_T_STEP (1<<0) /* Bit 0: Test step */ +#define BSC_T_ON BIT_2S /* Test mode on */ +#define BSC_T_OFF BIT_1S /* Test mode off */ +#define BSC_T_STEP BIT_0S /* Test step */ /* B3_RAM_ADDR 32 bit RAM Address, to read or write */ @@ -977,55 +1245,55 @@ /* RAM Interface Registers */ /* B3_RI_CTRL 16 bit RAM Iface Control Register */ /* Bit 15..10: reserved */ -#define RI_CLR_RD_PERR (1<<9) /* Bit 9: Clear IRQ RAM Read Parity Err */ -#define RI_CLR_WR_PERR (1<<8) /* Bit 8: Clear IRQ RAM Write Parity Err*/ - /* Bit 7..2: reserved */ -#define RI_RST_CLR (1<<1) /* Bit 1: Clear RAM Interface Reset */ -#define RI_RST_SET (1<<0) /* Bit 0: Set RAM Interface Reset */ +#define RI_CLR_RD_PERR BIT_9S /* Clear IRQ RAM Read Parity Err */ +#define RI_CLR_WR_PERR BIT_8S /* Clear IRQ RAM Write Parity Err*/ + /* Bit 7.. 2: reserved */ +#define RI_RST_CLR BIT_1S /* Clear RAM Interface Reset */ +#define RI_RST_SET BIT_0S /* Set RAM Interface Reset */ /* B3_RI_TEST 8 bit RAM Iface Test Register */ - /* Bit 15..4: reserved */ -#define RI_T_EV (1<<3) /* Bit 3: Timeout Event occured */ -#define RI_T_ON (1<<2) /* Bit 2: Timeout Timer Test On */ -#define RI_T_OFF (1<<1) /* Bit 1: Timeout Timer Test Off */ -#define RI_T_STEP (1<<0) /* Bit 0: Timeout Timer Step */ + /* Bit 15.. 4: reserved */ +#define RI_T_EV BIT_3S /* Timeout Event occured */ +#define RI_T_ON BIT_2S /* Timeout Timer Test On */ +#define RI_T_OFF BIT_1S /* Timeout Timer Test Off */ +#define RI_T_STEP BIT_0S /* Timeout Timer Step */ /* MAC Arbiter Registers */ /* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */ - /* Bit 15..4: reserved */ -#define MA_FOE_ON (1<<3) /* Bit 3: XMAC Fast Output Enable ON */ -#define MA_FOE_OFF (1<<2) /* Bit 2: XMAC Fast Output Enable OFF */ -#define MA_RST_CLR (1<<1) /* Bit 1: Clear MAC Arbiter Reset */ -#define MA_RST_SET (1<<0) /* Bit 0: Set MAC Arbiter Reset */ + /* Bit 15.. 4: reserved */ +#define MA_FOE_ON BIT_3S /* XMAC Fast Output Enable ON */ +#define MA_FOE_OFF BIT_2S /* XMAC Fast Output Enable OFF */ +#define MA_RST_CLR BIT_1S /* Clear MAC Arbiter Reset */ +#define MA_RST_SET BIT_0S /* Set MAC Arbiter Reset */ /* B3_MA_RC_CTRL 16 bit MAC Arbiter Recovery Ctrl Reg */ - /* Bit 15..8: reserved */ -#define MA_ENA_REC_TX2 (1<<7) /* Bit 7: Enable Recovery Timer TX2 */ -#define MA_DIS_REC_TX2 (1<<6) /* Bit 6: Disable Recovery Timer TX2 */ -#define MA_ENA_REC_TX1 (1<<5) /* Bit 5: Enable Recovery Timer TX1 */ -#define MA_DIS_REC_TX1 (1<<4) /* Bit 4: Disable Recovery Timer TX1 */ -#define MA_ENA_REC_RX2 (1<<3) /* Bit 3: Enable Recovery Timer RX2 */ -#define MA_DIS_REC_RX2 (1<<2) /* Bit 2: Disable Recovery Timer RX2 */ -#define MA_ENA_REC_RX1 (1<<1) /* Bit 1: Enable Recovery Timer RX1 */ -#define MA_DIS_REC_RX1 (1<<0) /* Bit 0: Disable Recovery Timer RX1 */ + /* Bit 15.. 8: reserved */ +#define MA_ENA_REC_TX2 BIT_7S /* Enable Recovery Timer TX2 */ +#define MA_DIS_REC_TX2 BIT_6S /* Disable Recovery Timer TX2 */ +#define MA_ENA_REC_TX1 BIT_5S /* Enable Recovery Timer TX1 */ +#define MA_DIS_REC_TX1 BIT_4S /* Disable Recovery Timer TX1 */ +#define MA_ENA_REC_RX2 BIT_3S /* Enable Recovery Timer RX2 */ +#define MA_DIS_REC_RX2 BIT_2S /* Disable Recovery Timer RX2 */ +#define MA_ENA_REC_RX1 BIT_1S /* Enable Recovery Timer RX1 */ +#define MA_DIS_REC_RX1 BIT_0S /* Disable Recovery Timer RX1 */ /* Packet Arbiter Registers */ /* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */ /* Bit 15..14: reserved */ -#define PA_CLR_TO_TX2 (1<<13) /* Bit 13: Clear IRQ Packet Timeout TX2 */ -#define PA_CLR_TO_TX1 (1<<12) /* Bit 12: Clear IRQ Packet Timeout TX1 */ -#define PA_CLR_TO_RX2 (1<<11) /* Bit 11: Clear IRQ Packet Timeout RX2 */ -#define PA_CLR_TO_RX1 (1<<10) /* Bit 10: Clear IRQ Packet Timeout RX1 */ -#define PA_ENA_TO_TX2 (1<<9) /* Bit 9: Enable Timeout Timer TX2 */ -#define PA_DIS_TO_TX2 (1<<8) /* Bit 8: Disable Timeout Timer TX2 */ -#define PA_ENA_TO_TX1 (1<<7) /* Bit 7: Enable Timeout Timer TX1 */ -#define PA_DIS_TO_TX1 (1<<6) /* Bit 6: Disable Timeout Timer TX1 */ -#define PA_ENA_TO_RX2 (1<<5) /* Bit 5: Enable Timeout Timer RX2 */ -#define PA_DIS_TO_RX2 (1<<4) /* Bit 4: Disable Timeout Timer RX2 */ -#define PA_ENA_TO_RX1 (1<<3) /* Bit 3: Enable Timeout Timer RX1 */ -#define PA_DIS_TO_RX1 (1<<2) /* Bit 2: Disable Timeout Timer RX1 */ -#define PA_RST_CLR (1<<1) /* Bit 1: Clear MAC Arbiter Reset */ -#define PA_RST_SET (1<<0) /* Bit 0: Set MAC Arbiter Reset */ +#define PA_CLR_TO_TX2 BIT_13S /* Clear IRQ Packet Timeout TX2 */ +#define PA_CLR_TO_TX1 BIT_12S /* Clear IRQ Packet Timeout TX1 */ +#define PA_CLR_TO_RX2 BIT_11S /* Clear IRQ Packet Timeout RX2 */ +#define PA_CLR_TO_RX1 BIT_10S /* Clear IRQ Packet Timeout RX1 */ +#define PA_ENA_TO_TX2 BIT_9S /* Enable Timeout Timer TX2 */ +#define PA_DIS_TO_TX2 BIT_8S /* Disable Timeout Timer TX2 */ +#define PA_ENA_TO_TX1 BIT_7S /* Enable Timeout Timer TX1 */ +#define PA_DIS_TO_TX1 BIT_6S /* Disable Timeout Timer TX1 */ +#define PA_ENA_TO_RX2 BIT_5S /* Enable Timeout Timer RX2 */ +#define PA_DIS_TO_RX2 BIT_4S /* Disable Timeout Timer RX2 */ +#define PA_ENA_TO_RX1 BIT_3S /* Enable Timeout Timer RX1 */ +#define PA_DIS_TO_RX1 BIT_2S /* Disable Timeout Timer RX1 */ +#define PA_RST_CLR BIT_1S /* Clear MAC Arbiter Reset */ +#define PA_RST_SET BIT_0S /* Set MAC Arbiter Reset */ #define PA_ENA_TO_ALL (PA_ENA_TO_RX1 | PA_ENA_TO_RX2 |\ PA_ENA_TO_TX1 | PA_ENA_TO_TX2) @@ -1035,57 +1303,57 @@ /* B3_MA_RC_TEST 16 bit MAC Arbiter Recovery Test Reg */ /* B3_PA_TEST 16 bit Packet Arbiter Test Register */ /* Bit 15, 11, 7, and 3 are reserved in B3_PA_TEST */ -#define TX2_T_EV (1<<15) /* Bit 15: TX2 Timeout/Recv Event occured*/ -#define TX2_T_ON (1<<14) /* Bit 14: TX2 Timeout/Recv Timer Test On*/ -#define TX2_T_OFF (1<<13) /* Bit 13: TX2 Timeout/Recv Timer Tst Off*/ -#define TX2_T_STEP (1<<12) /* Bit 12: TX2 Timeout/Recv Timer Step */ -#define TX1_T_EV (1<<11) /* Bit 11: TX1 Timeout/Recv Event occured*/ -#define TX1_T_ON (1<<10) /* Bit 10: TX1 Timeout/Recv Timer Test On*/ -#define TX1_T_OFF (1<<9) /* Bit 9: TX1 Timeout/Recv Timer Tst Off*/ -#define TX1_T_STEP (1<<8) /* Bit 8: TX1 Timeout/Recv Timer Step */ -#define RX2_T_EV (1<<7) /* Bit 7: RX2 Timeout/Recv Event occured*/ -#define RX2_T_ON (1<<6) /* Bit 6: RX2 Timeout/Recv Timer Test On*/ -#define RX2_T_OFF (1<<5) /* Bit 5: RX2 Timeout/Recv Timer Tst Off*/ -#define RX2_T_STEP (1<<4) /* Bit 4: RX2 Timeout/Recv Timer Step */ -#define RX1_T_EV (1<<3) /* Bit 3: RX1 Timeout/Recv Event occured*/ -#define RX1_T_ON (1<<2) /* Bit 2: RX1 Timeout/Recv Timer Test On*/ -#define RX1_T_OFF (1<<1) /* Bit 1: RX1 Timeout/Recv Timer Tst Off*/ -#define RX1_T_STEP (1<<0) /* Bit 0: RX1 Timeout/Recv Timer Step */ +#define TX2_T_EV BIT_15S /* TX2 Timeout/Recv Event occured */ +#define TX2_T_ON BIT_14S /* TX2 Timeout/Recv Timer Test On */ +#define TX2_T_OFF BIT_13S /* TX2 Timeout/Recv Timer Tst Off */ +#define TX2_T_STEP BIT_12S /* TX2 Timeout/Recv Timer Step */ +#define TX1_T_EV BIT_11S /* TX1 Timeout/Recv Event occured */ +#define TX1_T_ON BIT_10S /* TX1 Timeout/Recv Timer Test On */ +#define TX1_T_OFF BIT_9S /* TX1 Timeout/Recv Timer Tst Off */ +#define TX1_T_STEP BIT_8S /* TX1 Timeout/Recv Timer Step */ +#define RX2_T_EV BIT_7S /* RX2 Timeout/Recv Event occured */ +#define RX2_T_ON BIT_6S /* RX2 Timeout/Recv Timer Test On */ +#define RX2_T_OFF BIT_5S /* RX2 Timeout/Recv Timer Tst Off */ +#define RX2_T_STEP BIT_4S /* RX2 Timeout/Recv Timer Step */ +#define RX1_T_EV BIT_3S /* RX1 Timeout/Recv Event occured */ +#define RX1_T_ON BIT_2S /* RX1 Timeout/Recv Timer Test On */ +#define RX1_T_OFF BIT_1S /* RX1 Timeout/Recv Timer Tst Off */ +#define RX1_T_STEP BIT_0S /* RX1 Timeout/Recv Timer Step */ -/* Transmit Arbiter Registers MAC 1 and 2, user MR_ADDR() to address */ +/* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */ /* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */ /* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */ /* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ /* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */ /* Bit 31..24: reserved */ -#define TXA_MAX_VAL 0x00ffffffL /* Bit 23.. 0: Max TXA Timer/Cnt Val */ +#define TXA_MAX_VAL 0x00ffffffL /* Bit 23.. 0: Max TXA Timer/Cnt Val */ /* TXA_CTRL 8 bit Tx Arbiter Control Register */ -#define TXA_ENA_FSYNC (1<<7) /* Bit 7: Enable force of sync tx queue */ -#define TXA_DIS_FSYNC (1<<6) /* Bit 6: Disable force of sync tx queue*/ -#define TXA_ENA_ALLOC (1<<5) /* Bit 5: Enable alloc of free bandwidth*/ -#define TXA_DIS_ALLOC (1<<4) /* Bit 4: Disabl alloc of free bandwidth*/ -#define TXA_START_RC (1<<3) /* Bit 3: Start sync Rate Control */ -#define TXA_STOP_RC (1<<2) /* Bit 2: Stop sync Rate Control */ -#define TXA_ENA_ARB (1<<1) /* Bit 1: Enable Tx Arbiter */ -#define TXA_DIS_ARB (1<<0) /* Bit 0: Disable Tx Arbiter */ +#define TXA_ENA_FSYNC BIT_7S /* Enable force of sync Tx queue */ +#define TXA_DIS_FSYNC BIT_6S /* Disable force of sync Tx queue */ +#define TXA_ENA_ALLOC BIT_5S /* Enable alloc of free bandwidth */ +#define TXA_DIS_ALLOC BIT_4S /* Disable alloc of free bandwidth */ +#define TXA_START_RC BIT_3S /* Start sync Rate Control */ +#define TXA_STOP_RC BIT_2S /* Stop sync Rate Control */ +#define TXA_ENA_ARB BIT_1S /* Enable Tx Arbiter */ +#define TXA_DIS_ARB BIT_0S /* Disable Tx Arbiter */ /* TXA_TEST 8 bit Tx Arbiter Test Register */ - /* Bit 7..6: reserved */ -#define TXA_INT_T_ON (1<<5) /* Bit 5: Tx Arb Interval Timer Test On */ -#define TXA_INT_T_OFF (1<<4) /* Bit 4: Tx Arb Interval Timer Test Off*/ -#define TXA_INT_T_STEP (1<<3) /* Bit 3: Tx Arb Interval Timer Step */ -#define TXA_LIM_T_ON (1<<2) /* Bit 2: Tx Arb Limit Timer Test On */ -#define TXA_LIM_T_OFF (1<<1) /* Bit 1: Tx Arb Limit Timer Test Off */ -#define TXA_LIM_T_STEP (1<<0) /* Bit 0: Tx Arb Limit Timer Step */ + /* Bit 7.. 6: reserved */ +#define TXA_INT_T_ON BIT_5S /* Tx Arb Interval Timer Test On */ +#define TXA_INT_T_OFF BIT_4S /* Tx Arb Interval Timer Test Off */ +#define TXA_INT_T_STEP BIT_3S /* Tx Arb Interval Timer Step */ +#define TXA_LIM_T_ON BIT_2S /* Tx Arb Limit Timer Test On */ +#define TXA_LIM_T_OFF BIT_1S /* Tx Arb Limit Timer Test Off */ +#define TXA_LIM_T_STEP BIT_0S /* Tx Arb Limit Timer Step */ /* TXA_STAT 8 bit Tx Arbiter Status Register */ - /* Bit 7..1: reserved */ -#define TXA_PRIO_XS (1<<0) /* Bit 0: sync queue has prio to send */ + /* Bit 7.. 1: reserved */ +#define TXA_PRIO_XS BIT_0S /* sync queue has prio to send */ /* Q_BC 32 bit Current Byte Counter */ - /* Bit 31..16: reserved */ + /* Bit 31..16: reserved */ #define BC_MAX 0xffff /* Bit 15.. 0: Byte counter */ /* BMU Control Status Registers */ @@ -1095,94 +1363,95 @@ /* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */ /* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */ /* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */ -/* Q_CSR 32 bit BMU Control/Status Register */ - /* Bit 31..25: reserved */ -#define CSR_SV_IDLE (1L<<24) /* Bit 24: BMU SM Idle */ - /* Bit 23..22: reserved */ -#define CSR_DESC_CLR (1L<<21) /* Bit 21: Clear Reset for Descr */ -#define CSR_DESC_SET (1L<<20) /* Bit 20: Set Reset for Descr */ -#define CSR_FIFO_CLR (1L<<19) /* Bit 19: Clear Reset for FIFO */ -#define CSR_FIFO_SET (1L<<18) /* Bit 18: Set Reset for FIFO */ -#define CSR_HPI_RUN (1L<<17) /* Bit 17: Release HPI SM */ -#define CSR_HPI_RST (1L<<16) /* Bit 16: Reset HPI SM to Idle */ -#define CSR_SV_RUN (1L<<15) /* Bit 15: Release Supervisor SM */ -#define CSR_SV_RST (1L<<14) /* Bit 14: Reset Supervisor SM */ -#define CSR_DREAD_RUN (1L<<13) /* Bit 13: Release Descr Read SM */ -#define CSR_DREAD_RST (1L<<12) /* Bit 12: Reset Descr Read SM */ -#define CSR_DWRITE_RUN (1L<<11) /* Bit 11: Rel. Descr Write SM */ -#define CSR_DWRITE_RST (1L<<10) /* Bit 10: Reset Descr Write SM */ -#define CSR_TRANS_RUN (1L<<9) /* Bit 9: Release Transfer SM */ -#define CSR_TRANS_RST (1L<<8) /* Bit 8: Reset Transfer SM */ -#define CSR_ENA_POL (1L<<7) /* Bit 7: Enable Descr Polling */ -#define CSR_DIS_POL (1L<<6) /* Bit 6: Disable Descr Polling */ -#define CSR_STOP (1L<<5) /* Bit 5: Stop Rx/Tx Queue */ -#define CSR_START (1L<<4) /* Bit 4: Start Rx/Tx Queue */ -#define CSR_IRQ_CL_P (1L<<3) /* Bit 3: (Rx) Clear Parity IRQ */ -#define CSR_IRQ_CL_B (1L<<2) /* Bit 2: Clear EOB IRQ */ -#define CSR_IRQ_CL_F (1L<<1) /* Bit 1: Clear EOF IRQ */ -#define CSR_IRQ_CL_C (1L<<0) /* Bit 0: Clear ERR IRQ */ - -#define CSR_SET_RESET (CSR_DESC_SET|CSR_FIFO_SET|CSR_HPI_RST|CSR_SV_RST|\ - CSR_DREAD_RST|CSR_DWRITE_RST|CSR_TRANS_RST) -#define CSR_CLR_RESET (CSR_DESC_CLR|CSR_FIFO_CLR|CSR_HPI_RUN|CSR_SV_RUN|\ - CSR_DREAD_RUN|CSR_DWRITE_RUN|CSR_TRANS_RUN) - +/* Q_CSR 32 bit BMU Control/Status Register */ + /* Bit 31..25: reserved */ +#define CSR_SV_IDLE BIT_24 /* BMU SM Idle */ + /* Bit 23..22: reserved */ +#define CSR_DESC_CLR BIT_21 /* Clear Reset for Descr */ +#define CSR_DESC_SET BIT_20 /* Set Reset for Descr */ +#define CSR_FIFO_CLR BIT_19 /* Clear Reset for FIFO */ +#define CSR_FIFO_SET BIT_18 /* Set Reset for FIFO */ +#define CSR_HPI_RUN BIT_17 /* Release HPI SM */ +#define CSR_HPI_RST BIT_16 /* Reset HPI SM to Idle */ +#define CSR_SV_RUN BIT_15 /* Release Supervisor SM */ +#define CSR_SV_RST BIT_14 /* Reset Supervisor SM */ +#define CSR_DREAD_RUN BIT_13 /* Release Descr Read SM */ +#define CSR_DREAD_RST BIT_12 /* Reset Descr Read SM */ +#define CSR_DWRITE_RUN BIT_11 /* Release Descr Write SM */ +#define CSR_DWRITE_RST BIT_10 /* Reset Descr Write SM */ +#define CSR_TRANS_RUN BIT_9 /* Release Transfer SM */ +#define CSR_TRANS_RST BIT_8 /* Reset Transfer SM */ +#define CSR_ENA_POL BIT_7 /* Enable Descr Polling */ +#define CSR_DIS_POL BIT_6 /* Disable Descr Polling */ +#define CSR_STOP BIT_5 /* Stop Rx/Tx Queue */ +#define CSR_START BIT_4 /* Start Rx/Tx Queue */ +#define CSR_IRQ_CL_P BIT_3 /* (Rx) Clear Parity IRQ */ +#define CSR_IRQ_CL_B BIT_2 /* Clear EOB IRQ */ +#define CSR_IRQ_CL_F BIT_1 /* Clear EOF IRQ */ +#define CSR_IRQ_CL_C BIT_0 /* Clear ERR IRQ */ + +#define CSR_SET_RESET (CSR_DESC_SET | CSR_FIFO_SET | CSR_HPI_RST |\ + CSR_SV_RST | CSR_DREAD_RST | CSR_DWRITE_RST |\ + CSR_TRANS_RST) +#define CSR_CLR_RESET (CSR_DESC_CLR | CSR_FIFO_CLR | CSR_HPI_RUN |\ + CSR_SV_RUN | CSR_DREAD_RUN | CSR_DWRITE_RUN |\ + CSR_TRANS_RUN) /* Q_F 32 bit Flag Register */ - /* Bit 28..31: reserved */ -#define F_ALM_FULL (1L<<27) (Rx) /* Bit 27: (Rx) FIFO almost full */ -#define F_EMPTY (1L<<27) (Tx) /* Bit 27: (Tx) FIFO empty flag */ -#define F_FIFO_EOF (1L<<26) /* Bit 26: Fag bit in FIFO */ -#define F_WM_REACHED (1L<<25) /* Bit 25: Watermark reached */ - /* Bit 24: reserved */ + /* Bit 31..28: reserved */ +#define F_ALM_FULL BIT_27 /* Rx FIFO: almost full */ +#define F_EMPTY BIT_27 /* Tx FIFO: empty flag */ +#define F_FIFO_EOF BIT_26 /* Tag (EOF Flag) bit in FIFO */ +#define F_WM_REACHED BIT_25 /* Watermark reached */ + /* reserved */ #define F_FIFO_LEVEL (0x1fL<<16) /* Bit 23..16: # of Qwords in FIFO */ /* Bit 15..11: reserved */ #define F_WATER_MARK 0x0007ffL /* Bit 10.. 0: Watermark */ /* Q_T1 32 bit Test Register 1 */ /* Holds four State Machine control Bytes */ -#define SM_CRTL_SV (0xffL<<24) /* Bit 31..24: Control Supervisor SM */ -#define SM_CRTL_RD (0xffL<<16) /* Bit 23..16: Control Read Desc SM */ -#define SM_CRTL_WR (0xffL<<8) /* Bit 15.. 8: Control Write Desc SM */ -#define SM_CRTL_TR (0xffL<<0) /* Bit 7.. 0: Control Transfer SM */ +#define SM_CRTL_SV_MSK (0xffL<<24) /* Bit 31..24: Control Supervisor SM */ +#define SM_CRTL_RD_MSK (0xffL<<16) /* Bit 23..16: Control Read Desc SM */ +#define SM_CRTL_WR_MSK (0xffL<<8) /* Bit 15.. 8: Control Write Desc SM */ +#define SM_CRTL_TR_MSK 0xffL /* Bit 7.. 0: Control Transfer SM */ /* Q_T1_TR 8 bit Test Register 1 Transfer SM */ /* Q_T1_WR 8 bit Test Register 1 Write Descriptor SM */ /* Q_T1_RD 8 bit Test Register 1 Read Descriptor SM */ /* Q_T1_SV 8 bit Test Register 1 Supervisor SM */ + /* The control status byte of each machine looks like ... */ -#define SM_STATE 0xf0 /* Bit 7..4: State which shall be loaded */ -#define SM_LOAD (1<<3) /* Bit 3: Load the SM with SM_STATE */ -#define SM_TEST_ON (1<<2) /* Bit 2: Switch on SM Test Mode */ -#define SM_TEST_OFF (1<<1) /* Bit 1: Go off the Test Mode */ -#define SM_STEP (1<<0) /* Bit 0: Step the State Machine */ +#define SM_STATE 0xf0 /* Bit 7.. 4: State which shall be loaded */ +#define SM_LOAD BIT_3S /* Load the SM with SM_STATE */ +#define SM_TEST_ON BIT_2S /* Switch on SM Test Mode */ +#define SM_TEST_OFF BIT_1S /* Go off the Test Mode */ +#define SM_STEP BIT_0S /* Step the State Machine */ /* The encoding of the states is not supported by the Diagnostics Tool */ /* Q_T2 32 bit Test Register 2 */ - /* Bit 31..8: reserved */ -#define T2_AC_T_ON (1<<7) /* Bit 7: Address Counter Test Mode on */ -#define T2_AC_T_OFF (1<<6) /* Bit 6: Address Counter Test Mode off*/ -#define T2_BC_T_ON (1<<5) /* Bit 5: Byte Counter Test Mode on */ -#define T2_BC_T_OFF (1<<4) /* Bit 4: Byte Counter Test Mode off */ -#define T2_STEP04 (1<<3) /* Bit 3: Inc AC/Dec BC by 4 */ -#define T2_STEP03 (1<<2) /* Bit 2: Inc AC/Dec BC by 3 */ -#define T2_STEP02 (1<<1) /* Bit 1: Inc AC/Dec BC by 2 */ -#define T2_STEP01 (1<<0) /* Bit 0: Inc AC/Dec BC by 1 */ + /* Bit 31.. 8: reserved */ +#define T2_AC_T_ON BIT_7 /* Address Counter Test Mode on */ +#define T2_AC_T_OFF BIT_6 /* Address Counter Test Mode off */ +#define T2_BC_T_ON BIT_5 /* Byte Counter Test Mode on */ +#define T2_BC_T_OFF BIT_4 /* Byte Counter Test Mode off */ +#define T2_STEP04 BIT_3 /* Inc AC/Dec BC by 4 */ +#define T2_STEP03 BIT_2 /* Inc AC/Dec BC by 3 */ +#define T2_STEP02 BIT_1 /* Inc AC/Dec BC by 2 */ +#define T2_STEP01 BIT_0 /* Inc AC/Dec BC by 1 */ /* Q_T3 32 bit Test Register 3 */ - /* Bit 31..7: reserved */ -#define T3_MUX (7<<4) /* Bit 6.. 4: Mux Position */ - /* Bit 3: reserved */ -#define T3_VRAM (7<<0) /* Bit 2.. 0: Virtual RAM Buffer Address */ + /* Bit 31.. 7: reserved */ +#define T3_MUX_MSK (7<<4) /* Bit 6.. 4: Mux Position */ + /* Bit 3: reserved */ +#define T3_VRAM_MSK 7 /* Bit 2.. 0: Virtual RAM Buffer Address */ -/* RAM Buffer Register Offsets */ -/* use RB_ADDR(Queue,Offs) to address */ +/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */ /* RB_START 32 bit RAM Buffer Start Address */ /* RB_END 32 bit RAM Buffer End Address */ /* RB_WP 32 bit RAM Buffer Write Pointer */ /* RB_RP 32 bit RAM Buffer Read Pointer */ /* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */ -/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pasue Pack */ +/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */ /* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */ /* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */ /* RB_PC 32 bit RAM Buffer Packet Counter */ @@ -1190,144 +1459,325 @@ /* Bit 31..19: reserved */ #define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */ -/* RB_TST2 8 bit RAM Buffer Test Register 2 */ - /* Bit 4..7: reserved */ -#define RB_PC_DEC (1<<3) /* Bit 3: Packet Counter Decrem */ -#define RB_PC_T_ON (1<<2) /* Bit 2: Packet Counter Test On */ -#define RB_PC_T_OFF (1<<1) /* Bit 1: Packet Counter Tst Off */ -#define RB_PC_INC (1<<0) /* Bit 0: Packet Counter Increm */ - -/* RB_TST1 8 bit RAM Buffer Test Register 1 */ - /* Bit 7: reserved */ -#define RB_WP_T_ON (1<<6) /* Bit 6: Write Pointer Test On */ -#define RB_WP_T_OFF (1<<5) /* Bit 5: Write Pointer Test Off */ -#define RB_WP_INC (1<<4) /* Bit 4: Write Pointer Increm */ - /* Bit 3: reserved */ -#define RB_RP_T_ON (1<<2) /* Bit 2: Read Pointer Test On */ -#define RB_RP_T_OFF (1<<1) /* Bit 1: Read Pointer Test Off */ -#define RB_RP_DEC (1<<0) /* Bit 0: Read Pointer Decrement */ - -/* RB_CTRL 8 bit RAM Buffer Control Register */ - /* Bit 7..6: reserved */ -#define RB_ENA_STFWD (1<<5) /* Bit 5: Enable Store & Forward */ -#define RB_DIS_STFWD (1<<4) /* Bit 4: Disab. Store & Forward */ -#define RB_ENA_OP_MD (1<<3) /* Bit 3: Enable Operation Mode */ -#define RB_DIS_OP_MD (1<<2) /* Bit 2: Disab. Operation Mode */ -#define RB_RST_CLR (1<<1) /* Bit 1: Clr RAM Buf STM Reset */ -#define RB_RST_SET (1<<0) /* Bit 0: Set RAM Buf STM Reset */ +/* RB_TST2 8 bit RAM Buffer Test Register 2 */ + /* Bit 7.. 4: reserved */ +#define RB_PC_DEC BIT_3S /* Packet Counter Decrem */ +#define RB_PC_T_ON BIT_2S /* Packet Counter Test On */ +#define RB_PC_T_OFF BIT_1S /* Packet Counter Tst Off */ +#define RB_PC_INC BIT_0S /* Packet Counter Increm */ + +/* RB_TST1 8 bit RAM Buffer Test Register 1 */ + /* Bit 7: reserved */ +#define RB_WP_T_ON BIT_6S /* Write Pointer Test On */ +#define RB_WP_T_OFF BIT_5S /* Write Pointer Test Off */ +#define RB_WP_INC BIT_4S /* Write Pointer Increm */ + /* Bit 3: reserved */ +#define RB_RP_T_ON BIT_2S /* Read Pointer Test On */ +#define RB_RP_T_OFF BIT_1S /* Read Pointer Test Off */ +#define RB_RP_DEC BIT_0S /* Read Pointer Decrement */ + +/* RB_CTRL 8 bit RAM Buffer Control Register */ + /* Bit 7.. 6: reserved */ +#define RB_ENA_STFWD BIT_5S /* Enable Store & Forward */ +#define RB_DIS_STFWD BIT_4S /* Disable Store & Forward */ +#define RB_ENA_OP_MD BIT_3S /* Enable Operation Mode */ +#define RB_DIS_OP_MD BIT_2S /* Disable Operation Mode */ +#define RB_RST_CLR BIT_1S /* Clear RAM Buf STM Reset */ +#define RB_RST_SET BIT_0S /* Set RAM Buf STM Reset */ + +/* Receive and Transmit MAC FIFO Registers (GENESIS only) */ -/* Receive and Transmit MAC FIFO Registers, use MR_ADDR() to address */ /* RX_MFF_EA 32 bit Receive MAC FIFO End Address */ /* RX_MFF_WP 32 bit Receive MAC FIFO Write Pointer */ /* RX_MFF_RP 32 bit Receive MAC FIFO Read Pointer */ -/* RX_MFF_PC 32 bit Receive MAC FIFO Packet Counter*/ +/* RX_MFF_PC 32 bit Receive MAC FIFO Packet Counter */ /* RX_MFF_LEV 32 bit Receive MAC FIFO Level */ /* TX_MFF_EA 32 bit Transmit MAC FIFO End Address */ -/* TX_MFF_WP 32 bit Transmit MAC FIFO Write Pointer*/ -/* TX_MFF_WSP 32 bit Transmit MAC FIFO WR Shadow Pt*/ +/* TX_MFF_WP 32 bit Transmit MAC FIFO Write Pointer */ +/* TX_MFF_WSP 32 bit Transmit MAC FIFO WR Shadow Pointer */ /* TX_MFF_RP 32 bit Transmit MAC FIFO Read Pointer */ /* TX_MFF_PC 32 bit Transmit MAC FIFO Packet Cnt */ /* TX_MFF_LEV 32 bit Transmit MAC FIFO Level */ - /* Bit 31..6: reserved */ -#define MFF_MSK 0x007fL /* Bit 5..0: MAC FIFO Address/Pointer Bits */ + /* Bit 31.. 6: reserved */ +#define MFF_MSK 0x007fL /* Bit 5.. 0: MAC FIFO Address/Ptr Bits */ /* RX_MFF_CTRL1 16 bit Receive MAC FIFO Control Reg 1 */ - /* Bit 15..14: reserved */ -#define MFF_ENA_RDY_PAT (1<<13) /* Bit 13: Enable Ready Patch */ -#define MFF_DIS_RDY_PAT (1<<12) /* Bit 12: Disable Ready Patch */ -#define MFF_ENA_TIM_PAT (1<<11) /* Bit 11: Enable Timing Patch */ -#define MFF_DIS_TIM_PAT (1<<10) /* Bit 10: Disable Timing Patch */ -#define MFF_ENA_ALM_FUL (1<<9) /* Bit 9: Enable AlmostFull Sign*/ -#define MFF_DIS_ALM_FUL (1<<8) /* Bit 8: Disab. AlmostFull Sign*/ -#define MFF_ENA_PAUSE (1<<7) /* Bit 7: Enable Pause Signaling*/ -#define MFF_DIS_PAUSE (1<<6) /* Bit 6: Disab. Pause Signaling*/ -#define MFF_ENA_FLUSH (1<<5) /* Bit 5: Enable Frame Flushing */ -#define MFF_DIS_FLUSH (1<<4) /* Bit 4: Disab. Frame Flushing */ -#define MFF_ENA_TIST (1<<3) /* Bit 3: Enable Timestamp Gener*/ -#define MFF_DIS_TIST (1<<2) /* Bit 2: Disab. Timestamp Gener*/ -#define MFF_CLR_INTIST (1<<1) /* Bit 1: Clear IRQ No Timestamp*/ -#define MFF_CLR_INSTAT (1<<0) /* Bit 0: Clear IRQ No Status */ + /* Bit 15..14: reserved */ +#define MFF_ENA_RDY_PAT BIT_13S /* Enable Ready Patch */ +#define MFF_DIS_RDY_PAT BIT_12S /* Disable Ready Patch */ +#define MFF_ENA_TIM_PAT BIT_11S /* Enable Timing Patch */ +#define MFF_DIS_TIM_PAT BIT_10S /* Disable Timing Patch */ +#define MFF_ENA_ALM_FUL BIT_9S /* Enable AlmostFull Sign */ +#define MFF_DIS_ALM_FUL BIT_8S /* Disable AlmostFull Sign */ +#define MFF_ENA_PAUSE BIT_7S /* Enable Pause Signaling */ +#define MFF_DIS_PAUSE BIT_6S /* Disable Pause Signaling */ +#define MFF_ENA_FLUSH BIT_5S /* Enable Frame Flushing */ +#define MFF_DIS_FLUSH BIT_4S /* Disable Frame Flushing */ +#define MFF_ENA_TIST BIT_3S /* Enable Time Stamp Gener */ +#define MFF_DIS_TIST BIT_2S /* Disable Time Stamp Gener */ +#define MFF_CLR_INTIST BIT_1S /* Clear IRQ No Time Stamp */ +#define MFF_CLR_INSTAT BIT_0S /* Clear IRQ No Status */ #define MFF_RX_CTRL_DEF MFF_ENA_TIM_PAT /* TX_MFF_CTRL1 16 bit Transmit MAC FIFO Control Reg 1 */ -#define MFF_CLR_PERR (1<<15) /* Bit 15: Clear Parity Error IRQ*/ - /* Bit 14: reserved */ -#define MFF_ENA_PKT_REC (1<<13) /* Bit 13: Enable Packet Recovery*/ -#define MFF_DIS_PKT_REC (1<<12) /* Bit 12: Disable Packet Recov. */ -/* MFF_ENA_TIM_PAT (see RX_MFF_CTRL1)Bit 11: Enable Timing Patch */ -/* MFF_DIS_TIM_PAT (see RX_MFF_CTRL1)Bit 10: Disable Timing Patch */ -/* MFF_ENA_ALM_FUL (see RX_MFF_CTRL1)Bit 9: Enable AlmostFull Sign*/ -/* MFF_DIS_ALM_FUL (see RX_MFF_CTRL1)Bit 8: Disab. AlmostFull Sign*/ -#define MFF_ENA_W4E (1<<7) /* Bit 7: Enable Wait for Empty */ -#define MFF_DIS_W4E (1<<6) /* Bit 6: Disab. Wait for Empty */ -/* MFF_ENA_FLUSH (see RX_MFF_CTRL1)Bit 5: Enable Frame Flushing */ -/* MFF_DIS_FLUSH (see RX_MFF_CTRL1)Bit 4: Disab. Frame Flushing */ -#define MFF_ENA_LOOPB (1<<3) /* Bit 3: Enable Loopback */ -#define MFF_DIS_LOOPB (1<<2) /* Bit 2: Disable Loopback */ -#define MFF_CLR_MAC_RST (1<<1) /* Bit 1: Clear XMAC Reset */ -#define MFF_SET_MAC_RST (1<<0) /* Bit 0: Set XMAC Reset */ +#define MFF_CLR_PERR BIT_15S /* Clear Parity Error IRQ */ + /* Bit 14: reserved */ +#define MFF_ENA_PKT_REC BIT_13S /* Enable Packet Recovery */ +#define MFF_DIS_PKT_REC BIT_12S /* Disable Packet Recovery */ +/* MFF_ENA_TIM_PAT (see RX_MFF_CTRL1) Bit 11: Enable Timing Patch */ +/* MFF_DIS_TIM_PAT (see RX_MFF_CTRL1) Bit 10: Disable Timing Patch */ +/* MFF_ENA_ALM_FUL (see RX_MFF_CTRL1) Bit 9: Enable Almost Full Sign */ +/* MFF_DIS_ALM_FUL (see RX_MFF_CTRL1) Bit 8: Disable Almost Full Sign */ +#define MFF_ENA_W4E BIT_7S /* Enable Wait for Empty */ +#define MFF_DIS_W4E BIT_6S /* Disable Wait for Empty */ +/* MFF_ENA_FLUSH (see RX_MFF_CTRL1) Bit 5: Enable Frame Flushing */ +/* MFF_DIS_FLUSH (see RX_MFF_CTRL1) Bit 4: Disable Frame Flushing */ +#define MFF_ENA_LOOPB BIT_3S /* Enable Loopback */ +#define MFF_DIS_LOOPB BIT_2S /* Disable Loopback */ +#define MFF_CLR_MAC_RST BIT_1S /* Clear XMAC Reset */ +#define MFF_SET_MAC_RST BIT_0S /* Set XMAC Reset */ #define MFF_TX_CTRL_DEF (MFF_ENA_PKT_REC | MFF_ENA_TIM_PAT | MFF_ENA_FLUSH) -/* RX_MFF_TST2 8 bit Receive MAC FIFO Test Register 2 */ -/* TX_MFF_TST2 8 bit Transmit MAC FIFO Test Register 2 */ +/* RX_MFF_TST2 8 bit Receive MAC FIFO Test Register 2 */ +/* TX_MFF_TST2 8 bit Transmit MAC FIFO Test Register 2 */ /* Bit 7: reserved */ -#define MFF_WSP_T_ON (1<<6) /* Bit 6: (Tx) Write Shadow Pt TestOn */ -#define MFF_WSP_T_OFF (1<<5) /* Bit 5: (Tx) Write Shadow Pt TstOff */ -#define MFF_WSP_INC (1<<4) /* Bit 4: (Tx) Write Shadow Pt Increm */ -#define MFF_PC_DEC (1<<3) /* Bit 3: Packet Counter Decrem */ -#define MFF_PC_T_ON (1<<2) /* Bit 2: Packet Counter Test On */ -#define MFF_PC_T_OFF (1<<1) /* Bit 1: Packet Counter Tst Off */ -#define MFF_PC_INC (1<<0) /* Bit 0: Packet Counter Increm */ +#define MFF_WSP_T_ON BIT_6S /* Tx: Write Shadow Ptr TestOn */ +#define MFF_WSP_T_OFF BIT_5S /* Tx: Write Shadow Ptr TstOff */ +#define MFF_WSP_INC BIT_4S /* Tx: Write Shadow Ptr Increment */ +#define MFF_PC_DEC BIT_3S /* Packet Counter Decrement */ +#define MFF_PC_T_ON BIT_2S /* Packet Counter Test On */ +#define MFF_PC_T_OFF BIT_1S /* Packet Counter Test Off */ +#define MFF_PC_INC BIT_0S /* Packet Counter Increment */ -/* RX_MFF_TST1 8 bit Receive MAC FIFO Test Register 1 */ -/* TX_MFF_TST1 8 bit Transmit MAC FIFO Test Register 1 */ +/* RX_MFF_TST1 8 bit Receive MAC FIFO Test Register 1 */ +/* TX_MFF_TST1 8 bit Transmit MAC FIFO Test Register 1 */ /* Bit 7: reserved */ -#define MFF_WP_T_ON (1<<6) /* Bit 6: Write Pointer Test On */ -#define MFF_WP_T_OFF (1<<5) /* Bit 5: Write Pointer Test Off */ -#define MFF_WP_INC (1<<4) /* Bit 4: Write Pointer Increm */ - /* Bit 3: reserved */ -#define MFF_RP_T_ON (1<<2) /* Bit 2: Read Pointer Test On */ -#define MFF_RP_T_OFF (1<<1) /* Bit 1: Read Pointer Test Off */ -#define MFF_RP_DEC (1<<0) /* Bit 0: Read Pointer Decrement */ +#define MFF_WP_T_ON BIT_6S /* Write Pointer Test On */ +#define MFF_WP_T_OFF BIT_5S /* Write Pointer Test Off */ +#define MFF_WP_INC BIT_4S /* Write Pointer Increm */ + /* Bit 3: reserved */ +#define MFF_RP_T_ON BIT_2S /* Read Pointer Test On */ +#define MFF_RP_T_OFF BIT_1S /* Read Pointer Test Off */ +#define MFF_RP_DEC BIT_0S /* Read Pointer Decrement */ /* RX_MFF_CTRL2 8 bit Receive MAC FIFO Control Reg 2 */ /* TX_MFF_CTRL2 8 bit Transmit MAC FIFO Control Reg 2 */ /* Bit 7..4: reserved */ -#define MFF_ENA_OP_MD (1<<3) /* Bit 3: Enable Operation Mode */ -#define MFF_DIS_OP_MD (1<<2) /* Bit 2: Disab. Operation Mode */ -#define MFF_RST_CLR (1<<1) /* Bit 1: Clear MAC FIFO Reset */ -#define MFF_RST_SET (1<<0) /* Bit 0: Set MAC FIFO Reset */ - - -/* Receive, Transmit, and Link LED Counter Registers */ -/* RX_LED_CTRL 8 bit Receive LED Cnt Control Reg */ -/* TX_LED_CTRL 8 bit Transmit LED Cnt Control Reg */ -/* LNK_SYNC_CTRL 8 bit Link Sync Cnt Control Register */ - /* Bit 7..3: reserved */ -#define LED_START (1<<2) /* Bit 2: Start Timer */ -#define LED_STOP (1<<1) /* Bit 1: Stop Timer */ -#define LED_STATE (1<<0) /* Bit 0:(Rx/Tx)LED State, 1=LED on */ -#define LED_CLR_IRQ (1<<0) /* Bit 0:(Lnk) Clear Link IRQ */ - -/* RX_LED_TST 8 bit Receive LED Cnt Test Register */ -/* TX_LED_TST 8 bit Transmit LED Cnt Test Register */ -/* LNK_SYNC_TST 8 bit Link Sync Cnt Test Register */ - /* Bit 7..3: reserved */ -#define LED_T_ON (1<<2) /* Bit 2: LED Counter Testmode On */ -#define LED_T_OFF (1<<1) /* Bit 1: LED Counter Testmode Off */ -#define LED_T_STEP (1<<0) /* Bit 0: LED Counter Step */ - -/* LNK_LED_REG 8 bit Link LED Register */ - /* Bit 7..6: reserved */ -#define LED_BLK_ON (1<<5) /* Bit 5: Link LED Blinking On */ -#define LED_BLK_OFF (1<<4) /* Bit 4: Link LED Blinking Off */ -#define LED_SYNC_ON (1<<3) /* Bit 3: Use Sync Wire to switch LED */ -#define LED_SYNC_OFF (1<<2) /* Bit 2: Disable Sync Wire Input */ -#define LED_ON (1<<1) /* Bit 1: switch LED on */ -#define LED_OFF (1<<0) /* Bit 0: switch LED off */ +#define MFF_ENA_OP_MD BIT_3S /* Enable Operation Mode */ +#define MFF_DIS_OP_MD BIT_2S /* Disable Operation Mode */ +#define MFF_RST_CLR BIT_1S /* Clear MAC FIFO Reset */ +#define MFF_RST_SET BIT_0S /* Set MAC FIFO Reset */ + + +/* Link LED Counter Registers (GENESIS only) */ + +/* RX_LED_CTRL 8 bit Receive LED Cnt Control Reg */ +/* TX_LED_CTRL 8 bit Transmit LED Cnt Control Reg */ +/* LNK_SYNC_CTRL 8 bit Link Sync Cnt Control Register */ + /* Bit 7.. 3: reserved */ +#define LED_START BIT_2S /* Start Timer */ +#define LED_STOP BIT_1S /* Stop Timer */ +#define LED_STATE BIT_0S /* Rx/Tx: LED State, 1=LED on */ +#define LED_CLR_IRQ BIT_0S /* Lnk: Clear Link IRQ */ + +/* RX_LED_TST 8 bit Receive LED Cnt Test Register */ +/* TX_LED_TST 8 bit Transmit LED Cnt Test Register */ +/* LNK_SYNC_TST 8 bit Link Sync Cnt Test Register */ + /* Bit 7.. 3: reserved */ +#define LED_T_ON BIT_2S /* LED Counter Test mode On */ +#define LED_T_OFF BIT_1S /* LED Counter Test mode Off */ +#define LED_T_STEP BIT_0S /* LED Counter Step */ + +/* LNK_LED_REG 8 bit Link LED Register */ + /* Bit 7.. 6: reserved */ +#define LED_BLK_ON BIT_5S /* Link LED Blinking On */ +#define LED_BLK_OFF BIT_4S /* Link LED Blinking Off */ +#define LED_SYNC_ON BIT_3S /* Use Sync Wire to switch LED */ +#define LED_SYNC_OFF BIT_2S /* Disable Sync Wire Input */ +#define LED_ON BIT_1S /* switch LED on */ +#define LED_OFF BIT_0S /* switch LED off */ + +/* Receive and Transmit GMAC FIFO Registers (YUKON only) */ + +/* RX_GMF_EA 32 bit Rx GMAC FIFO End Address */ +/* RX_GMF_AF_THR 32 bit Rx GMAC FIFO Almost Full Thresh. */ +/* RX_GMF_WP 32 bit Rx GMAC FIFO Write Pointer */ +/* RX_GMF_WLEV 32 bit Rx GMAC FIFO Write Level */ +/* RX_GMF_RP 32 bit Rx GMAC FIFO Read Pointer */ +/* RX_GMF_RLEV 32 bit Rx GMAC FIFO Read Level */ +/* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */ +/* TX_GMF_AE_THR 32 bit Tx GMAC FIFO Almost Empty Thresh.*/ +/* TX_GMF_WP 32 bit Tx GMAC FIFO Write Pointer */ +/* TX_GMF_WSP 32 bit Tx GMAC FIFO Write Shadow Ptr. */ +/* TX_GMF_WLEV 32 bit Tx GMAC FIFO Write Level */ +/* TX_GMF_RP 32 bit Tx GMAC FIFO Read Pointer */ +/* TX_GMF_RSTP 32 bit Tx GMAC FIFO Restart Pointer */ +/* TX_GMF_RLEV 32 bit Tx GMAC FIFO Read Level */ + +/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ + /* Bits 31..15: reserved */ +#define GMF_WP_TST_ON BIT_14 /* Write Pointer Test On */ +#define GMF_WP_TST_OFF BIT_13 /* Write Pointer Test Off */ +#define GMF_WP_STEP BIT_12 /* Write Pointer Step/Increment */ + /* Bit 11: reserved */ +#define GMF_RP_TST_ON BIT_10 /* Read Pointer Test On */ +#define GMF_RP_TST_OFF BIT_9 /* Read Pointer Test Off */ +#define GMF_RP_STEP BIT_8 /* Read Pointer Step/Increment */ +#define GMF_RX_F_FL_ON BIT_7 /* Rx FIFO Flush Mode On */ +#define GMF_RX_F_FL_OFF BIT_6 /* Rx FIFO Flush Mode Off */ +#define GMF_CLI_RX_FO BIT_5 /* Clear IRQ Rx FIFO Overrun */ +#define GMF_CLI_RX_FC BIT_4 /* Clear IRQ Rx Frame Complete */ +#define GMF_OPER_ON BIT_3 /* Operational Mode On */ +#define GMF_OPER_OFF BIT_2 /* Operational Mode Off */ +#define GMF_RST_CLR BIT_1 /* Clear GMAC FIFO Reset */ +#define GMF_RST_SET BIT_0 /* Set GMAC FIFO Reset */ + +/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */ + /* Bits 31..19: reserved */ +#define GMF_WSP_TST_ON BIT_18 /* Write Shadow Pointer Test On */ +#define GMF_WSP_TST_OFF BIT_17 /* Write Shadow Pointer Test Off */ +#define GMF_WSP_STEP BIT_16 /* Write Shadow Pointer Step/Increment */ + /* Bits 15..7: same as for RX_GMF_CTRL_T */ +#define GMF_CLI_TX_FU BIT_6 /* Clear IRQ Tx FIFO Underrun */ +#define GMF_CLI_TX_FC BIT_5 /* Clear IRQ Tx Frame Complete */ +#define GMF_CLI_TX_PE BIT_4 /* Clear IRQ Tx Parity Error */ + /* Bits 3..0: same as for RX_GMF_CTRL_T */ + +#define GMF_RX_CTRL_DEF GMF_OPER_ON +#define GMF_TX_CTRL_DEF GMF_OPER_ON + +/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */ + /* Bit 7.. 3: reserved */ +#define GMT_ST_START BIT_2S /* Start Time Stamp Timer */ +#define GMT_ST_STOP BIT_1S /* Stop Time Stamp Timer */ +#define GMT_ST_CLR_IRQ BIT_0S /* Clear Time Stamp Timer IRQ */ + +/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */ + /* Bits 31.. 8: reserved */ +#define GMC_H_BURST_ON BIT_7 /* Half Duplex Burst Mode On */ +#define GMC_H_BURST_OFF BIT_6 /* Half Duplex Burst Mode Off */ +#define GMC_F_LOOPB_ON BIT_5 /* FIFO Loopback On */ +#define GMC_F_LOOPB_OFF BIT_4 /* FIFO Loopback Off */ +#define GMC_PAUSE_ON BIT_3 /* Pause On */ +#define GMC_PAUSE_OFF BIT_2 /* Pause Off */ +#define GMC_RST_CLR BIT_1 /* Clear GMAC Reset */ +#define GMC_RST_SET BIT_0 /* Set GMAC Reset */ + +/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */ + /* Bits 31..29: reserved */ +#define GPC_SEL_BDT BIT_28 /* Select Bi-Dir. Transfer for MDC/MDIO */ +#define GPC_INT_POL_HI BIT_27 /* IRQ Polarity is Active HIGH */ +#define GPC_75_OHM BIT_26 /* Use 75 Ohm Termination instead of 50 */ +#define GPC_DIS_FC BIT_25 /* Disable Automatic Fiber/Copper Detection */ +#define GPC_DIS_SLEEP BIT_24 /* Disable Energy Detect */ +#define GPC_HWCFG_M_3 BIT_23 /* HWCFG_MODE[3] */ +#define GPC_HWCFG_M_2 BIT_22 /* HWCFG_MODE[2] */ +#define GPC_HWCFG_M_1 BIT_21 /* HWCFG_MODE[1] */ +#define GPC_HWCFG_M_0 BIT_20 /* HWCFG_MODE[0] */ +#define GPC_ANEG_0 BIT_19 /* ANEG[0] */ +#define GPC_ENA_XC BIT_18 /* Enable MDI crossover */ +#define GPC_DIS_125 BIT_17 /* Disable 125 MHz clock */ +#define GPC_ANEG_3 BIT_16 /* ANEG[3] */ +#define GPC_ANEG_2 BIT_15 /* ANEG[2] */ +#define GPC_ANEG_1 BIT_14 /* ANEG[1] */ +#define GPC_ENA_PAUSE BIT_13 /* Enable Pause (SYM_OR_REM) */ +#define GPC_PHYADDR_4 BIT_12 /* Bit 4 of Phy Addr */ +#define GPC_PHYADDR_3 BIT_11 /* Bit 3 of Phy Addr */ +#define GPC_PHYADDR_2 BIT_10 /* Bit 2 of Phy Addr */ +#define GPC_PHYADDR_1 BIT_9 /* Bit 1 of Phy Addr */ +#define GPC_PHYADDR_0 BIT_8 /* Bit 0 of Phy Addr */ + /* Bits 7..2: reserved */ +#define GPC_RST_CLR BIT_1 /* Clear GPHY Reset */ +#define GPC_RST_SET BIT_0 /* Set GPHY Reset */ + +#define GPC_HWCFG_GMII_COP (GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | \ + GPC_HWCFG_M_1 | GPC_HWCFG_M_0) + +#define GPC_HWCFG_GMII_FIB ( GPC_HWCFG_M_2 | \ + GPC_HWCFG_M_1 | GPC_HWCFG_M_0) + +#define GPC_ANEG_ADV_ALL_M (GPC_ANEG_3 | GPC_ANEG_2 | \ + GPC_ANEG_1 | GPC_ANEG_0) + +/* forced speed and duplex mode (don't mix with other ANEG bits) */ +#define GPC_FRC10MBIT_HALF 0 +#define GPC_FRC10MBIT_FULL GPC_ANEG_0 +#define GPC_FRC100MBIT_HALF GPC_ANEG_1 +#define GPC_FRC100MBIT_FULL (GPC_ANEG_0 | GPC_ANEG_1) + +/* auto-negotiation with limited advertised speeds */ +/* mix only with master/slave settings (for copper) */ +#define GPC_ADV_1000_HALF GPC_ANEG_2 +#define GPC_ADV_1000_FULL GPC_ANEG_3 +#define GPC_ADV_ALL (GPC_ANEG_2 | GPC_ANEG_3) + +/* master/slave settings */ +/* only for copper with 1000 Mbps */ +#define GPC_FORCE_MASTER 0 +#define GPC_FORCE_SLAVE GPC_ANEG_0 +#define GPC_PREF_MASTER GPC_ANEG_1 +#define GPC_PREF_SLAVE (GPC_ANEG_1 | GPC_ANEG_0) + +/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */ +/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */ +#define GM_IS_TX_CO_OV BIT_5 /* Transmit Counter Overflow IRQ */ +#define GM_IS_RX_CO_OV BIT_4 /* Receive Counter Overflow IRQ */ +#define GM_IS_TX_FF_UR BIT_3 /* Transmit FIFO Underrun */ +#define GM_IS_TX_COMPL BIT_2 /* Frame Transmission Complete */ +#define GM_IS_RX_FF_OR BIT_1 /* Receive FIFO Overrun */ +#define GM_IS_RX_COMPL BIT_0 /* Frame Reception Complete */ + +#define GMAC_DEF_MSK (GM_IS_TX_CO_OV | GM_IS_RX_CO_OV | \ + GM_IS_TX_FF_UR) + +/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ + /* Bits 15.. 2: reserved */ +#define GMLC_RST_CLR BIT_1S /* Clear GMAC Link Reset */ +#define GMLC_RST_SET BIT_0S /* Set GMAC Link Reset */ + + +/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */ +#define WOL_CTL_LINK_CHG_OCC BIT_15S +#define WOL_CTL_MAGIC_PKT_OCC BIT_14S +#define WOL_CTL_PATTERN_OCC BIT_13S + +#define WOL_CTL_CLEAR_RESULT BIT_12S + +#define WOL_CTL_ENA_PME_ON_LINK_CHG BIT_11S +#define WOL_CTL_DIS_PME_ON_LINK_CHG BIT_10S +#define WOL_CTL_ENA_PME_ON_MAGIC_PKT BIT_9S +#define WOL_CTL_DIS_PME_ON_MAGIC_PKT BIT_8S +#define WOL_CTL_ENA_PME_ON_PATTERN BIT_7S +#define WOL_CTL_DIS_PME_ON_PATTERN BIT_6S + +#define WOL_CTL_ENA_LINK_CHG_UNIT BIT_5S +#define WOL_CTL_DIS_LINK_CHG_UNIT BIT_4S +#define WOL_CTL_ENA_MAGIC_PKT_UNIT BIT_3S +#define WOL_CTL_DIS_MAGIC_PKT_UNIT BIT_2S +#define WOL_CTL_ENA_PATTERN_UNIT BIT_1S +#define WOL_CTL_DIS_PATTERN_UNIT BIT_0S + +#define WOL_CTL_DEFAULT \ + (WOL_CTL_DIS_PME_ON_LINK_CHG | \ + WOL_CTL_DIS_PME_ON_PATTERN | \ + WOL_CTL_DIS_PME_ON_MAGIC_PKT | \ + WOL_CTL_DIS_LINK_CHG_UNIT | \ + WOL_CTL_DIS_PATTERN_UNIT | \ + WOL_CTL_DIS_MAGIC_PKT_UNIT) + +/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */ +#define WOL_CTL_PATT_ENA(x) (BIT_0 << (x)) + +#define SK_NUM_WOL_PATTERN 7 +#define SK_PATTERN_PER_WORD 4 +#define SK_BITMASK_PATTERN 7 +#define SK_POW_PATTERN_LENGTH 128 + +#define WOL_LENGTH_MSK 0x7f +#define WOL_LENGTH_SHIFT 8 /* Receive and Transmit Descriptors ******************************************/ @@ -1356,55 +1806,56 @@ /* Receive Descriptor struct */ typedef struct s_HwRxd { SK_U32 volatile RxCtrl; /* Receive Buffer Control Field */ - SK_U32 RxNext; /* Physical Address Pointer to the next TxD */ - SK_U32 RxAdrLo; /* Physical Receive Buffer Address lower dword*/ - SK_U32 RxAdrHi; /* Physical Receive Buffer Address upper dword*/ - SK_U32 RxStat; /* Receive Frame Status Word */ - SK_U32 RxTiSt; /* Receive Timestamp provided by the XMAC */ + SK_U32 RxNext; /* Physical Address Pointer to the next RxD */ + SK_U32 RxAdrLo; /* Physical Rx Buffer Address lower dword */ + SK_U32 RxAdrHi; /* Physical Rx Buffer Address upper dword */ + SK_U32 RxStat; /* Receive Frame Status Word */ + SK_U32 RxTiSt; /* Receive Time Stamp (from XMAC on GENESIS) */ #ifndef SK_USE_REV_DESC - SK_U16 RxTcpSum1; /* TCP Checksum 1 */ - SK_U16 RxTcpSum2; /* TCP Checksum 2 */ - SK_U16 RxTcpSp1; /* TCP Checksum Calculation Start Position 1 */ - SK_U16 RxTcpSp2; /* TCP Checksum Calculation Start Position 2 */ + SK_U16 RxTcpSum1; /* TCP Checksum 1 */ + SK_U16 RxTcpSum2; /* TCP Checksum 2 */ + SK_U16 RxTcpSp1; /* TCP Checksum Calculation Start Position 1 */ + SK_U16 RxTcpSp2; /* TCP Checksum Calculation Start Position 2 */ #else /* SK_USE_REV_DESC */ - SK_U16 RxTcpSum2; /* TCP Checksum 2 */ - SK_U16 RxTcpSum1; /* TCP Checksum 1 */ - SK_U16 RxTcpSp2; /* TCP Checksum Calculation Start Position 2 */ - SK_U16 RxTcpSp1; /* TCP Checksum Calculation Start Position 1 */ + SK_U16 RxTcpSum2; /* TCP Checksum 2 */ + SK_U16 RxTcpSum1; /* TCP Checksum 1 */ + SK_U16 RxTcpSp2; /* TCP Checksum Calculation Start Position 2 */ + SK_U16 RxTcpSp1; /* TCP Checksum Calculation Start Position 1 */ #endif /* SK_USE_REV_DESC */ } SK_HWRXD; /* * Drivers which use the reverse descriptor feature (PCI_OUR_REG_2) * should set the define SK_USE_REV_DESC. - * Structures are 'normaly' not endianess dependent. But in + * Structures are 'normaly' not endianess dependent. But in * this case the SK_U16 fields are bound to bit positions inside the * descriptor. RxTcpSum1 e.g. must start at bit 0 within the 6.th DWord. * The bit positions inside a DWord are of course endianess dependent and - * swaps if the DWord is swaped by the hardware. + * swaps if the DWord is swapped by the hardware. */ /* Descriptor Bit Definition */ /* TxCtrl Transmit Buffer Control Field */ -/* RxCtrl Receive Buffer Control Field */ -#define BMU_OWN (1UL<<31) /* Bit 31: OWN bit: 0=host/1=BMU */ -#define BMU_STF (1L<<30) /* Bit 30: Start of Frame ? */ -#define BMU_EOF (1L<<29) /* Bit 29: End of Frame ? */ -#define BMU_IRQ_EOB (1L<<28) /* Bit 28: Req "End of Buff" IRQ */ -#define BMU_IRQ_EOF (1L<<27) /* Bit 27: Req "End of Frame" IRQ*/ +/* RxCtrl Receive Buffer Control Field */ +#define BMU_OWN BIT_31 /* OWN bit: 0=host/1=BMU */ +#define BMU_STF BIT_30 /* Start of Frame */ +#define BMU_EOF BIT_29 /* End of Frame */ +#define BMU_IRQ_EOB BIT_28 /* Req "End of Buffer" IRQ */ +#define BMU_IRQ_EOF BIT_27 /* Req "End of Frame" IRQ */ /* TxCtrl specific bits */ -#define BMU_STFWD (1L<<26) /* Bit 26: (Tx) Store&Forward Frame */ -#define BMU_NO_FCS (1L<<25) /* Bit 25: (Tx) disable XMAC FCS gener*/ -#define BMU_SW (1L<<24) /* Bit 24: (Tx) 1 bit res. for SW use */ +#define BMU_STFWD BIT_26 /* (Tx) Store & Forward Frame */ +#define BMU_NO_FCS BIT_25 /* (Tx) Disable MAC FCS (CRC) generation */ +#define BMU_SW BIT_24 /* (Tx) 1 bit res. for SW use */ /* RxCtrl specific bits */ -#define BMU_DEV_0 (1L<<26) /* Bit 26: (Rx) transfer data to Dev0 */ -#define BMU_STAT_VAL (1L<<25) /* Bit 25: (Rx) RxStat Valid */ -#define BMU_TIST_VAL (1L<<24) /* Bit 24: (Rx) RxTiSt Valid */ - /* Bit 23..16: BMU Check Opcodes */ -#define BMU_CHECK 0x00550000L /* Default BMU check */ -#define BMU_TCP_CHECK 0x00560000L /* Descr with TCP ext */ -#define BMU_BBC 0x0000FFFFL /* Bit 15..0: Buffer Byte Counter */ +#define BMU_DEV_0 BIT_26 /* (Rx) Transfer data to Dev0 */ +#define BMU_STAT_VAL BIT_25 /* (Rx) Rx Status Valid */ +#define BMU_TIST_VAL BIT_24 /* (Rx) Rx TimeStamp Valid */ + /* Bit 23..16: BMU Check Opcodes */ +#define BMU_CHECK (0x55L<<16) /* Default BMU check */ +#define BMU_TCP_CHECK (0x56L<<16) /* Descr with TCP ext */ +#define BMU_UDP_CHECK (0x57L<<16) /* Descr with UDP ext (YUKON only) */ +#define BMU_BBC 0xFFFFL /* Bit 15.. 0: Buffer Byte Counter */ /* TxStat Transmit Frame Status Word */ /* RxStat Receive Frame Status Word */ @@ -1420,9 +1871,9 @@ /* * FlashProm specification */ -#define MAX_PAGES 0x20000L /* Every byte has a single page */ -#define MAX_FADDR 1 /* 1 byte per page */ -#define SKFDDI_PSZ 8 /* address PROM size */ +#define MAX_PAGES 0x20000L /* Every byte has a single page */ +#define MAX_FADDR 1 /* 1 byte per page */ +#define SKFDDI_PSZ 8 /* address PROM size */ /* macros ********************************************************************/ @@ -1439,59 +1890,67 @@ /* * Macro Q_ADDR() * - * Use this macro to address the Receive and Transmit Queue Registers. + * Use this macro to access the Receive and Transmit Queue Registers. * - * para Queue Queue to address. - * Values: Q_R1, Q_R2, Q_XS1, Q_XA1, Q_XS2, and Q_XA2 + * para: + * Queue Queue to access. + * Values: Q_R1, Q_R2, Q_XS1, Q_XA1, Q_XS2, and Q_XA2 * Offs Queue register offset. - * Values: Q_D, Q_DA_L ... Q_T2, Q_T3 + * Values: Q_D, Q_DA_L ... Q_T2, Q_T3 * - * usage SK_IN32(pAC,Q_ADDR(Q_R2,Q_BC),pVal) + * usage SK_IN32(pAC, Q_ADDR(Q_R2, Q_BC), pVal) */ -#define Q_ADDR(Queue,Offs) (B8_Q_REGS + (Queue) + (Offs)) +#define Q_ADDR(Queue, Offs) (B8_Q_REGS + (Queue) + (Offs)) /* * Macro RB_ADDR() * - * Use this macro to address the RAM Buffer Registers. + * Use this macro to access the RAM Buffer Registers. * - * para Queue Queue to address. - * Values: Q_R1, Q_R2, Q_XS1, Q_XA1, Q_XS2, and Q_XA2 + * para: + * Queue Queue to access. + * Values: Q_R1, Q_R2, Q_XS1, Q_XA1, Q_XS2, and Q_XA2 * Offs Queue register offset. - * Values: RB_START, RB_END ... RB_LEV, RB_CTRL + * Values: RB_START, RB_END ... RB_LEV, RB_CTRL * - * usage SK_IN32(pAC,RB_ADDR(Q_R2,RB_RP),pVal) + * usage SK_IN32(pAC, RB_ADDR(Q_R2, RB_RP), pVal) */ -#define RB_ADDR(Queue,Offs) (B16_RAM_REGS + (Queue) + (Offs)) +#define RB_ADDR(Queue, Offs) (B16_RAM_REGS + (Queue) + (Offs)) /* * MAC Related Registers */ -#define MAC_1 0 /* belongs to the port near the slot */ +#define MAC_1 0 /* belongs to the port near the slot */ #define MAC_2 1 /* belongs to the port far away from the slot */ /* * Macro MR_ADDR() * - * Use this macro to address a MAC Related Registers in side the ASIC. + * Use this macro to access a MAC Related Registers inside the ASIC. * - * para Queue Queue to address. - * Values: TXA_ITI_INI ... TXA_TEST, - * RX_MFF_EA ... RX_LED_TST, - * LNK_SYNC_INI ... LNK_LED_REG, and - * TX_MFF_EA ... TX_LED_TST - * Mac MAC to address. - * Values: MAC_1, MAC_2 + * para: + * Mac MAC to access. + * Values: MAC_1, MAC_2 + * Offs MAC register offset. + * Values: RX_MFF_EA, RX_MFF_WP ... LNK_LED_REG, + * TX_MFF_EA, TX_MFF_WP ... TX_LED_TST * - * usage SK_IN32(pAC,MR_ADDR(MAC_1,TX_MFF_EA),pVal) + * usage SK_IN32(pAC, MR_ADDR(MAC_1, TX_MFF_EA), pVal) */ -#define MR_ADDR(Mac,Offs) (((Mac) << 7) + (Offs)) +#define MR_ADDR(Mac, Offs) (((Mac) << 7) + (Offs)) +#ifdef SK_LITTLE_ENDIAN +#define XM_WORD_LO 0 +#define XM_WORD_HI 1 +#else /* !SK_LITTLE_ENDIAN */ +#define XM_WORD_LO 1 +#define XM_WORD_HI 0 +#endif /* !SK_LITTLE_ENDIAN */ /* - * macros to access the XMAC + * macros to access the XMAC (GENESIS only) * * XM_IN16(), to read a 16 bit register (e.g. XM_MMU_CMD) * XM_OUT16(), to write a 16 bit register (e.g. XM_MMU_CMD) @@ -1502,43 +1961,37 @@ * XM_INHASH(), to read the XM_HSM_CHK register * XM_OUTHASH() to write the XM_HSM_CHK register * - * para: Mac XMAC to address values: MAC_1 or MAC_2 - * IoC I/O context needed for SK IO macros - * Reg XMAC Register to read or write - * (p)Val Value or pointer to the value which should be read or - * written. + * para: + * Mac XMAC to access values: MAC_1 or MAC_2 + * IoC I/O context needed for SK I/O macros + * Reg XMAC Register to read or write + * (p)Val Value or pointer to the value which should be read or written * * usage: XM_OUT16(IoC, MAC_1, XM_MMU_CMD, Value); */ -#ifdef SK_LITTLE_ENDIAN -#define XM_WORD_LO 0 -#define XM_WORD_HI 1 -#else /* !SK_LITTLE_ENDIAN */ -#define XM_WORD_LO 1 -#define XM_WORD_HI 0 -#endif /* !SK_LITTLE_ENDIAN */ +#define XMA(Mac, Reg) \ + ((BASE_XMAC_1 + (Mac) * (BASE_XMAC_2 - BASE_XMAC_1)) | ((Reg) << 1)) -#define XMA(Mac,Reg) (((0x1000 << (Mac)) + 0x1000) | ((Reg) << 1)) +#define XM_IN16(IoC, Mac, Reg, pVal) \ + SK_IN16((IoC), XMA((Mac), (Reg)), (pVal)) -#define XM_IN16(IoC,Mac,Reg,pVal) SK_IN16((IoC), XMA((Mac), (Reg)), (pVal)) -#define XM_OUT16(IoC,Mac,Reg,Val) SK_OUT16((IoC), XMA((Mac), (Reg)), (Val)) +#define XM_OUT16(IoC, Mac, Reg, Val) \ + SK_OUT16((IoC), XMA((Mac), (Reg)), (Val)) -#define XM_IN32(IoC,Mac,Reg,pVal) { \ +#define XM_IN32(IoC, Mac, Reg, pVal) { \ SK_IN16((IoC), XMA((Mac), (Reg)), \ (SK_U16 *)&((SK_U16 *)(pVal))[XM_WORD_LO]); \ SK_IN16((IoC), XMA((Mac), (Reg+2)), \ (SK_U16 *)&((SK_U16 *)(pVal))[XM_WORD_HI]); \ } -#define XM_OUT32(IoC,Mac,Reg,Val) { \ - SK_OUT16((IoC), XMA((Mac), (Reg)), (SK_U16)((Val) & 0x0000ffffL)); \ - SK_OUT16((IoC), XMA((Mac), (Reg+2)),(SK_U16)(((Val)>>16) & 0x0000ffffL)); \ +#define XM_OUT32(IoC, Mac, Reg, Val) { \ + SK_OUT16((IoC), XMA((Mac), (Reg)), (SK_U16)((Val) & 0xffffL)); \ + SK_OUT16((IoC), XMA((Mac), (Reg+2)), (SK_U16)(((Val) >> 16) & 0xffffL));\ } -/* - * Remember: we are always writing to / reading from LITTLE ENDIAN memory - */ +/* Remember: we are always writing to / reading from LITTLE ENDIAN memory */ #define XM_INADDR(IoC, Mac, Reg, pVal) { \ SK_U16 Word; \ @@ -1605,32 +2058,150 @@ } /* + * macros to access the GMAC (YUKON only) + * + * GM_IN16(), to read a 16 bit register (e.g. GM_GP_STAT) + * GM_OUT16(), to write a 16 bit register (e.g. GM_GP_CTRL) + * GM_IN32(), to read a 32 bit register (e.g. GM_) + * GM_OUT32(), to write a 32 bit register (e.g. GM_) + * GM_INADDR(), to read a network address register (e.g. GM_SRC_ADDR_1L) + * GM_OUTADDR(), to write a network address register (e.g. GM_SRC_ADDR_2L) + * GM_INHASH(), to read the GM_MC_ADDR_H1 register + * GM_OUTHASH() to write the GM_MC_ADDR_H1 register + * + * para: + * Mac GMAC to access values: MAC_1 or MAC_2 + * IoC I/O context needed for SK I/O macros + * Reg GMAC Register to read or write + * (p)Val Value or pointer to the value which should be read or written + * + * usage: GM_OUT16(IoC, MAC_1, GM_GP_CTRL, Value); + */ + +#define GMA(Mac, Reg) \ + ((BASE_GMAC_1 + (Mac) * (BASE_GMAC_2 - BASE_GMAC_1)) | (Reg)) + +#define GM_IN16(IoC, Mac, Reg, pVal) \ + SK_IN16((IoC), GMA((Mac), (Reg)), (pVal)) + +#define GM_OUT16(IoC, Mac, Reg, Val) \ + SK_OUT16((IoC), GMA((Mac), (Reg)), (Val)) + +#define GM_IN32(IoC, Mac, Reg, pVal) { \ + SK_IN16((IoC), GMA((Mac), (Reg)), \ + (SK_U16 *)&((SK_U16 *)(pVal))[XM_WORD_LO]); \ + SK_IN16((IoC), GMA((Mac), (Reg+4)), \ + (SK_U16 *)&((SK_U16 *)(pVal))[XM_WORD_HI]); \ +} + +#define GM_OUT32(IoC, Mac, Reg, Val) { \ + SK_OUT16((IoC), GMA((Mac), (Reg)), (SK_U16)((Val) & 0xffffL)); \ + SK_OUT16((IoC), GMA((Mac), (Reg+4)), (SK_U16)(((Val) >> 16) & 0xffffL));\ +} + +#define GM_INADDR(IoC, Mac, Reg, pVal) { \ + SK_U16 Word; \ + SK_U8 *pByte; \ + pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \ + SK_IN16((IoC), GMA((Mac), (Reg)), &Word); \ + pByte[0] = (SK_U8)(Word & 0x00ff); \ + pByte[1] = (SK_U8)((Word >> 8) & 0x00ff); \ + SK_IN16((IoC), GMA((Mac), (Reg+4)), &Word); \ + pByte[2] = (SK_U8)(Word & 0x00ff); \ + pByte[3] = (SK_U8)((Word >> 8) & 0x00ff); \ + SK_IN16((IoC), GMA((Mac), (Reg+8)), &Word); \ + pByte[4] = (SK_U8)(Word & 0x00ff); \ + pByte[5] = (SK_U8)((Word >> 8) & 0x00ff); \ +} + +#define GM_OUTADDR(IoC, Mac, Reg, pVal) { \ + SK_U8 *pByte; \ + pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \ + SK_OUT16((IoC), GMA((Mac), (Reg)), (SK_U16) \ + (((SK_U16)(pByte[0]) & 0x00ff) | \ + (((SK_U16)(pByte[1]) << 8) & 0xff00))); \ + SK_OUT16((IoC), GMA((Mac), (Reg+4)), (SK_U16) \ + (((SK_U16)(pByte[2]) & 0x00ff) | \ + (((SK_U16)(pByte[3]) << 8) & 0xff00))); \ + SK_OUT16((IoC), GMA((Mac), (Reg+8)), (SK_U16) \ + (((SK_U16)(pByte[4]) & 0x00ff) | \ + (((SK_U16)(pByte[5]) << 8) & 0xff00))); \ +} + +#define GM_INHASH(IoC, Mac, Reg, pVal) { \ + SK_U16 Word; \ + SK_U8 *pByte; \ + pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \ + SK_IN16((IoC), GMA((Mac), (Reg)), &Word); \ + pByte[0] = (SK_U8)(Word & 0x00ff); \ + pByte[1] = (SK_U8)((Word >> 8) & 0x00ff); \ + SK_IN16((IoC), GMA((Mac), (Reg+4)), &Word); \ + pByte[2] = (SK_U8)(Word & 0x00ff); \ + pByte[3] = (SK_U8)((Word >> 8) & 0x00ff); \ + SK_IN16((IoC), GMA((Mac), (Reg+8)), &Word); \ + pByte[4] = (SK_U8)(Word & 0x00ff); \ + pByte[5] = (SK_U8)((Word >> 8) & 0x00ff); \ + SK_IN16((IoC), GMA((Mac), (Reg+12)), &Word); \ + pByte[6] = (SK_U8)(Word & 0x00ff); \ + pByte[7] = (SK_U8)((Word >> 8) & 0x00ff); \ +} + +#define GM_OUTHASH(IoC, Mac, Reg, pVal) { \ + SK_U8 *pByte; \ + pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \ + SK_OUT16((IoC), GMA((Mac), (Reg)), (SK_U16) \ + (((SK_U16)(pByte[0]) & 0x00ff)| \ + (((SK_U16)(pByte[1]) << 8) & 0xff00))); \ + SK_OUT16((IoC), GMA((Mac), (Reg+4)), (SK_U16) \ + (((SK_U16)(pByte[2]) & 0x00ff)| \ + (((SK_U16)(pByte[3]) << 8) & 0xff00))); \ + SK_OUT16((IoC), GMA((Mac), (Reg+8)), (SK_U16) \ + (((SK_U16)(pByte[4]) & 0x00ff)| \ + (((SK_U16)(pByte[5]) << 8) & 0xff00))); \ + SK_OUT16((IoC), GMA((Mac), (Reg+12)), (SK_U16) \ + (((SK_U16)(pByte[6]) & 0x00ff)| \ + (((SK_U16)(pByte[7]) << 8) & 0xff00))); \ +} + +/* + * Different MAC Types + */ +#define SK_MAC_XMAC 0 /* Xaqti XMAC II */ +#define SK_MAC_GMAC 1 /* Marvell GMAC */ + +/* * Different PHY Types */ -#define SK_PHY_XMAC 0 /* integrated in Xmac II*/ -#define SK_PHY_BCOM 1 /* Broadcom BCM5400 */ -#define SK_PHY_LONE 2 /* Level One LXT1000 */ -#define SK_PHY_NAT 3 /* National DP83891 */ +#define SK_PHY_XMAC 0 /* integrated in XMAC II */ +#define SK_PHY_BCOM 1 /* Broadcom BCM5400 */ +#define SK_PHY_LONE 2 /* Level One LXT1000 */ +#define SK_PHY_NAT 3 /* National DP83891 */ +#define SK_PHY_MARV_COPPER 4 /* Marvell 88E1011S */ +#define SK_PHY_MARV_FIBER 5 /* Marvell 88E1011S working on fiber */ /* - * PHY addresses (bits 8..12 of PHY address reg) + * PHY addresses (bits 12..8 of PHY address reg) */ #define PHY_ADDR_XMAC (0<<8) #define PHY_ADDR_BCOM (1<<8) #define PHY_ADDR_LONE (3<<8) #define PHY_ADDR_NAT (0<<8) + +/* GPHY address (bits 15..11 of SMI control reg) */ +#define PHY_ADDR_MARV 0 /* * macros to access the PHY * * PHY_READ() read a 16 bit value from the PHY - * PHY_WIRTE() write a 16 bit value to the PHY + * PHY_WRITE() write a 16 bit value to the PHY * - * para: IoC I/O context needed for SK IO macros - * pPort Pointer to port struct for PhyAddr - * Mac XMAC to address values: MAC_1 or MAC_2 - * PhyReg PHY Register to read or write - * (p)Val Value or pointer to the value which should be read or + * para: + * IoC I/O context needed for SK I/O macros + * pPort Pointer to port struct for PhyAddr + * Mac XMAC to access values: MAC_1 or MAC_2 + * PhyReg PHY Register to read or write + * (p)Val Value or pointer to the value which should be read or * written. * * usage: PHY_READ(IoC, pPort, MAC_1, PHY_CTRL, Value); @@ -1671,7 +2242,7 @@ XM_IN16((IoC), (Mac), XM_PHY_DATA, (pVal)); \ } \ } -#endif +#endif /* DEBUG */ #define PHY_WRITE(IoC, pPort, Mac, PhyReg, Val) { \ SK_U16 Mmu; \ @@ -1693,48 +2264,52 @@ /* * Macro PCI_C() * - * Use this macro to address PCI config register from the IO space. + * Use this macro to access PCI config register from the I/O space. * - * para Addr PCI configuration register to address. + * para: + * Addr PCI configuration register to access. * Values: PCI_VENDOR_ID ... PCI_VPD_ADDR, * - * usage SK_IN16(pAC,PCI_C(PCI_VENDOR_ID),pVal); + * usage SK_IN16(pAC, PCI_C(PCI_VENDOR_ID), pVal); */ #define PCI_C(Addr) (B7_CFG_SPC + (Addr)) /* PCI Config Space */ /* - * Macro SK_ADDR(Base,Addr) + * Macro SK_HW_ADDR(Base, Addr) * * Calculates the effective HW address * - * para Base IO- or memory base address + * para: + * Base I/O or memory base address * Addr Address offset * * usage: May be used in SK_INxx and SK_OUTxx macros - * #define SK_IN8(pAC,Addr,pVal) ...\ - * *pVal = (SK_U8) inp(SK_ADDR(pAC->Hw.Iop,Addr))) + * #define SK_IN8(pAC, Addr, pVal) ...\ + * *pVal = (SK_U8)inp(SK_HW_ADDR(pAC->Hw.Iop, Addr))) */ #ifdef SK_MEM_MAPPED_IO -#define SK_HW_ADDR(Base,Addr) ((Base)+(Addr)) +#define SK_HW_ADDR(Base, Addr) ((Base) + (Addr)) #else /* SK_MEM_MAPPED_IO */ -#define SK_HW_ADDR(Base,Addr) ((Base)+(((Addr)&0x7F)|((Addr)>>7 ? 0x80:0))) +#define SK_HW_ADDR(Base, Addr) \ + ((Base) + (((Addr) & 0x7f) | (((Addr) >> 7 > 0) ? 0x80 : 0))) #endif /* SK_MEM_MAPPED_IO */ -#define SZ_LONG (sizeof(SK_U32)) +#define SZ_LONG (sizeof(SK_U32)) /* * Macro SK_HWAC_LINK_LED() * * Use this macro to set the link LED mode. - * para pAC Pointer to adapter context struct - * IoC I/O context needed for SK IO macros - * Port Port number + * para: + * pAC Pointer to adapter context struct + * IoC I/O context needed for SK I/O macros + * Port Port number * Mode Mode to set for this LED */ #define SK_HWAC_LINK_LED(pAC, IoC, Port, Mode) \ - SK_OUT8(IoC, MR_ADDR(Port,LNK_LED_REG), Mode); + SK_OUT8(IoC, MR_ADDR(Port, LNK_LED_REG), Mode); + - /* typedefs *******************************************************************/ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/skgehwt.h linux.21pre4-ac2/drivers/net/sk98lin/h/skgehwt.h --- linux.21pre4/drivers/net/sk98lin/h/skgehwt.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/skgehwt.h 2003-01-06 15:38:18.000000000 +0000 @@ -1,32 +1,23 @@ /****************************************************************************** * * Name: skhwt.h - * Project: Genesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.4 $ - * Date: $Date: 1998/08/19 09:50:58 $ + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.5 $ + * Date: $Date: 1999/11/22 13:54:24 $ * Purpose: Defines for the hardware timer functions * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1989-1998 SysKonnect, + * (C)Copyright 1998,1999 SysKonnect, * a business unit of Schneider & Koch & Co. Datensysteme GmbH. - * All Rights Reserved * - * THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF SYSKONNECT - * The copyright notice above does not evidence any - * actual or intended publication of such source code. - * - * This Module contains Proprietary Information of SysKonnect - * and should be treated as Confidential. - * - * The information in this file is provided for the exclusive use of - * the licensees of SysKonnect. - * Such users have the right to use, modify, and incorporate this code - * into products for purposes authorized by the license agreement - * provided they include this notice and the associated copyright notice - * with any such product. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * * The information in this file is provided "AS IS" without warranty. * ******************************************************************************/ @@ -36,6 +27,9 @@ * History: * * $Log: skgehwt.h,v $ + * Revision 1.5 1999/11/22 13:54:24 cgoos + * Changed license header to GPL. + * * Revision 1.4 1998/08/19 09:50:58 gklug * fix: remove struct keyword from c-code (see CCC) add typedefs * diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/skgei2c.h linux.21pre4-ac2/drivers/net/sk98lin/h/skgei2c.h --- linux.21pre4/drivers/net/sk98lin/h/skgei2c.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/skgei2c.h 2003-01-06 15:38:18.000000000 +0000 @@ -2,17 +2,15 @@ * * Name: skgei2c.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.17 $ - * Date: $Date: 1999/11/22 13:55:25 $ - * Purpose: Special genesis defines for I2C - * (taken from Monalisa (taken from Concentrator)) + * Version: $Revision: 1.23 $ + * Date: $Date: 2002/12/19 14:34:27 $ + * Purpose: Special GEnesis defines for TWSI * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998,1999 SysKonnect, - * a business unit of Schneider & Koch & Co. Datensysteme GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -28,6 +26,28 @@ * History: * * $Log: skgei2c.h,v $ + * Revision 1.23 2002/12/19 14:34:27 rschmidt + * Added cast in macros SK_I2C_SET_BIT() and SK_I2C_CLR_BIT() + * Editorial changes (TWSI) + * + * Revision 1.22 2002/10/14 16:45:56 rschmidt + * Editorial changes (TWSI) + * + * Revision 1.21 2002/08/13 08:42:24 rschmidt + * Changed define for SK_MIN_SENSORS back to 5 + * Merged defines for PHY PLL 3V3 voltage (A and B) + * Editorial changes + * + * Revision 1.20 2002/08/06 09:43:56 jschmalz + * Extensions and changes for Yukon + * + * Revision 1.19 2002/08/02 12:00:08 rschmidt + * Added defines for YUKON sensors + * Editorial changes + * + * Revision 1.18 2001/08/16 12:44:33 afischer + * LM80 sensor init values corrected + * * Revision 1.17 1999/11/22 13:55:25 cgoos * Changed license header to GPL. * @@ -70,7 +90,7 @@ * * Revision 1.5 1998/08/17 06:52:21 malthoff * Remove unrequired macros. - * Add macros for accessing I2C SW register. + * Add macros for accessing TWSI SW register. * * Revision 1.4 1998/08/13 08:30:18 gklug * add: conversion factors for read values @@ -81,8 +101,8 @@ * * Revision 1.2 1998/08/11 07:54:38 gklug * add: sensor states for GE sensors - * add: Macro to access I2c hardware register - * chg: Error messages for I2c errors + * add: Macro to access TWSI hardware register + * chg: Error messages for TWSI errors * * Revision 1.1 1998/07/17 11:27:56 gklug * Created. @@ -92,7 +112,7 @@ ******************************************************************************/ /* - * SKGEI2C.H contains all SK-98xx specific defines for the I2C handling + * SKGEI2C.H contains all SK-98xx specific defines for the TWSI handling */ #ifndef _INC_SKGEI2C_H_ @@ -101,12 +121,12 @@ /* * Macros to access the B2_I2C_CTRL */ -#define SK_I2C_CTL(IoC,flag,dev,reg,burst) \ - SK_OUT32(IoC,B2_I2C_CTRL,\ - (flag ? 0x80000000UL : 0x0L ) | \ +#define SK_I2C_CTL(IoC, flag, dev, reg, burst) \ + SK_OUT32(IoC, B2_I2C_CTRL,\ + (flag ? 0x80000000UL : 0x0L) | \ (((SK_U32) reg << 16) & I2C_ADDR) | \ (((SK_U32) dev << 9) & I2C_DEV_SEL) | \ - (( burst << 4) & I2C_BURST_LEN) ) + (( burst << 4) & I2C_BURST_LEN)) #define SK_I2C_STOP(IoC) { \ SK_U32 I2cCtrl; \ @@ -114,29 +134,29 @@ SK_OUT32(IoC, B2_I2C_CTRL, I2cCtrl | I2C_STOP); \ } -#define SK_I2C_GET_CTL(Ioc,pI2cCtrl) SK_IN32(Ioc,B2_I2C_CTRL,pI2cCtrl) +#define SK_I2C_GET_CTL(IoC, pI2cCtrl) SK_IN32(IoC, B2_I2C_CTRL, pI2cCtrl) /* - * Macros to access the I2C SW Registers + * Macros to access the TWSI SW Registers */ #define SK_I2C_SET_BIT(IoC, SetBits) { \ SK_U8 OrgBits; \ SK_IN8(IoC, B2_I2C_SW, &OrgBits); \ - SK_OUT8(IoC, B2_I2C_SW, OrgBits | (SetBits)); \ + SK_OUT8(IoC, B2_I2C_SW, OrgBits | (SK_U8)(SetBits)); \ } -#define SK_I2C_CLR_BIT(IoC,ClrBits) { \ +#define SK_I2C_CLR_BIT(IoC, ClrBits) { \ SK_U8 OrgBits; \ SK_IN8(IoC, B2_I2C_SW, &OrgBits); \ - SK_OUT8(IoC, B2_I2C_SW, OrgBits & ~(ClrBits)); \ + SK_OUT8(IoC, B2_I2C_SW, OrgBits & ~((SK_U8)(ClrBits))); \ } -#define SK_I2C_GET_SW(IoC,pI2cSw) SK_IN8(IoC,B2_I2C_SW,pI2cSw) +#define SK_I2C_GET_SW(IoC, pI2cSw) SK_IN8(IoC, B2_I2C_SW, pI2cSw) /* * define the possible sensor states */ -#define SK_SEN_IDLE 0 /* Idle: sensor not read */ +#define SK_SEN_IDLE 0 /* Idle: sensor not read */ #define SK_SEN_VALUE 1 /* Value Read cycle */ #define SK_SEN_VALEXT 2 /* Extended Value Read cycle */ @@ -147,7 +167,7 @@ #define SK_LM80_VT_LSB 22 /* 22mV LSB resolution */ #define SK_LM80_TEMP_LSB 10 /* 1 degree LSB resolution */ #define SK_LM80_TEMPEXT_LSB 5 /* 0.5 degree LSB resolution for the - * extension value + * extension value */ #define SK_LM80_FAN_FAKTOR ((22500L*60)/(1*2)) /* formula: counter = (22500*60)/(rpm * divisor * pulses/2) @@ -163,20 +183,27 @@ #define SK_MIN_SENSORS 5 /* minimal no. of installed sensors */ /* + * To watch the statemachine (JS) use the timer in two ways instead of one as hitherto + */ +#define SK_TIMER_WATCH_STATEMACHINE 0 /* Watch the statemachine to finish in a specific time */ +#define SK_TIMER_NEW_GAUGING 1 /* Start a new gauging when timer expires */ + + +/* * Defines for the individual Thresholds */ /* Temperature sensor */ -#define SK_SEN_ERRHIGH0 800 /* Temperature High Err Threshold */ -#define SK_SEN_WARNHIGH0 700 /* Temperature High Warn Threshold */ -#define SK_SEN_WARNLOW0 100 /* Temperature Low Err Threshold */ -#define SK_SEN_ERRLOW0 0 /* Temperature Low Warn Threshold */ +#define SK_SEN_TEMP_HIGH_ERR 800 /* Temperature High Err Threshold */ +#define SK_SEN_TEMP_HIGH_WARN 700 /* Temperature High Warn Threshold */ +#define SK_SEN_TEMP_LOW_WARN 100 /* Temperature Low Warn Threshold */ +#define SK_SEN_TEMP_LOW_ERR 0 /* Temperature Low Err Threshold */ /* VCC which should be 5 V */ -#define SK_SEN_ERRHIGH1 5588 /* Voltage PCI High Err Threshold */ -#define SK_SEN_WARNHIGH1 5346 /* Voltage PCI High Warn Threshold */ -#define SK_SEN_WARNLOW1 4664 /* Voltage PCI Low Err Threshold */ -#define SK_SEN_ERRLOW1 4422 /* Voltage PCI Low Warn Threshold */ +#define SK_SEN_PCI_5V_HIGH_ERR 5588 /* Voltage PCI High Err Threshold */ +#define SK_SEN_PCI_5V_HIGH_WARN 5346 /* Voltage PCI High Warn Threshold */ +#define SK_SEN_PCI_5V_LOW_WARN 4664 /* Voltage PCI Low Warn Threshold */ +#define SK_SEN_PCI_5V_LOW_ERR 4422 /* Voltage PCI Low Err Threshold */ /* * VIO may be 5 V or 3.3 V. Initialization takes two parts: @@ -187,49 +214,66 @@ * Warning limits are +-5% of the exepected voltage. * Error limits are +-10% of the expected voltage. */ -#define SK_SEN_ERRHIGH2 5588 /* Voltage PCI-IO High Err Threshold */ -#define SK_SEN_WARNHIGH2 5346 /* Voltage PCI-IO High Warn Threshold */ -#define SK_SEN_WARNLOW2 3146 /* Voltage PCI-IO Low Err Threshold */ -#define SK_SEN_ERRLOW2 2970 /* Voltage PCI-IO Low Warn Threshold */ + +/* Bug fix AF: 16.Aug.2001: Correct the init base of LM80 sensor */ + +#define SK_SEN_PCI_IO_5V_HIGH_ERR 5566 /* + 10% V PCI-IO High Err Threshold */ +#define SK_SEN_PCI_IO_5V_HIGH_WARN 5324 /* + 5% V PCI-IO High Warn Threshold */ + /* 5000 mVolt */ +#define SK_SEN_PCI_IO_5V_LOW_WARN 4686 /* - 5% V PCI-IO Low Warn Threshold */ +#define SK_SEN_PCI_IO_5V_LOW_ERR 4444 /* - 10% V PCI-IO Low Err Threshold */ + +#define SK_SEN_PCI_IO_RANGE_LIMITER 4000 /* 4000 mV range delimiter */ /* correction values for the second pass */ -#define SK_SEN_ERRHIGH2C 3630 /* Voltage PCI-IO High Err Threshold */ -#define SK_SEN_WARNHIGH2C 3476 /* Voltage PCI-IO High Warn Threshold */ -#define SK_SEN_WARNLOW2C 4664 /* Voltage PCI-IO Low Err Threshold */ -#define SK_SEN_ERRLOW2C 4422 /* Voltage PCI-IO Low Warn Threshold */ +#define SK_SEN_PCI_IO_3V3_HIGH_ERR 3850 /* + 15% V PCI-IO High Err Threshold */ +#define SK_SEN_PCI_IO_3V3_HIGH_WARN 3674 /* + 10% V PCI-IO High Warn Threshold */ + /* 3300 mVolt */ +#define SK_SEN_PCI_IO_3V3_LOW_WARN 2926 /* - 10% V PCI-IO Low Warn Threshold */ +#define SK_SEN_PCI_IO_3V3_LOW_ERR 2772 /* - 15% V PCI-IO Low Err Threshold */ + /* * VDD voltage */ -#define SK_SEN_ERRHIGH3 3630 /* Voltage ASIC High Err Threshold */ -#define SK_SEN_WARNHIGH3 3476 /* Voltage ASIC High Warn Threshold */ -#define SK_SEN_WARNLOW3 3146 /* Voltage ASIC Low Err Threshold */ -#define SK_SEN_ERRLOW3 2970 /* Voltage ASIC Low Warn Threshold */ +#define SK_SEN_VDD_HIGH_ERR 3630 /* Voltage ASIC High Err Threshold */ +#define SK_SEN_VDD_HIGH_WARN 3476 /* Voltage ASIC High Warn Threshold */ +#define SK_SEN_VDD_LOW_WARN 3146 /* Voltage ASIC Low Warn Threshold */ +#define SK_SEN_VDD_LOW_ERR 2970 /* Voltage ASIC Low Err Threshold */ + +/* + * PHY PLL 3V3 voltage + */ +#define SK_SEN_PLL_3V3_HIGH_ERR 3630 /* Voltage PMA High Err Threshold */ +#define SK_SEN_PLL_3V3_HIGH_WARN 3476 /* Voltage PMA High Warn Threshold */ +#define SK_SEN_PLL_3V3_LOW_WARN 3146 /* Voltage PMA Low Warn Threshold */ +#define SK_SEN_PLL_3V3_LOW_ERR 2970 /* Voltage PMA Low Err Threshold */ /* - * PLC_3V3 voltage - * PHY_PLL_A_3V3 voltage + * VAUX (YUKON only) */ -#define SK_SEN_ERRHIGH4 3630 /* Voltage PMA High Err Threshold */ -#define SK_SEN_WARNHIGH4 3476 /* Voltage PMA High Warn Threshold */ -#define SK_SEN_WARNLOW4 3146 /* Voltage PMA Low Err Threshold */ -#define SK_SEN_ERRLOW4 2970 /* Voltage PMA Low Warn Threshold */ +#define SK_SEN_VAUX_3V3_HIGH_ERR 3630 /* Voltage VAUX High Err Threshold */ +#define SK_SEN_VAUX_3V3_HIGH_WARN 3476 /* Voltage VAUX High Warn Threshold */ +#define SK_SEN_VAUX_3V3_LOW_WARN 3146 /* Voltage VAUX Low Warn Threshold */ +#define SK_SEN_VAUX_3V3_LOW_ERR 2970 /* Voltage VAUX Low Err Threshold */ +#define SK_SEN_VAUX_0V_WARN_ERR 0 /* if VAUX not present */ +#define SK_SEN_VAUX_RANGE_LIMITER 1000 /* 1000 mV range delimiter */ /* - * PHY_2V5 voltage + * PHY 2V5 voltage */ -#define SK_SEN_ERRHIGH5 2750 /* Voltage PHY High Err Threshold */ -#define SK_SEN_WARNHIGH5 2640 /* Voltage PHY High Warn Threshold */ -#define SK_SEN_WARNLOW5 2376 /* Voltage PHY Low Err Threshold */ -#define SK_SEN_ERRLOW5 2222 /* Voltage PHY Low Warn Threshold */ +#define SK_SEN_PHY_2V5_HIGH_ERR 2750 /* Voltage PHY High Err Threshold */ +#define SK_SEN_PHY_2V5_HIGH_WARN 2640 /* Voltage PHY High Warn Threshold */ +#define SK_SEN_PHY_2V5_LOW_WARN 2376 /* Voltage PHY Low Warn Threshold */ +#define SK_SEN_PHY_2V5_LOW_ERR 2222 /* Voltage PHY Low Err Threshold */ /* - * PHY_PLL_B_3V3 voltage + * ASIC Core 1V5 voltage (YUKON only) */ -#define SK_SEN_ERRHIGH6 3630 /* Voltage PMA High Err Threshold */ -#define SK_SEN_WARNHIGH6 3476 /* Voltage PMA High Warn Threshold */ -#define SK_SEN_WARNLOW6 3146 /* Voltage PMA Low Err Threshold */ -#define SK_SEN_ERRLOW6 2970 /* Voltage PMA Low Warn Threshold */ +#define SK_SEN_CORE_1V5_HIGH_ERR 1650 /* Voltage ASIC Core High Err Threshold */ +#define SK_SEN_CORE_1V5_HIGH_WARN 1575 /* Voltage ASIC Core High Warn Threshold */ +#define SK_SEN_CORE_1V5_LOW_WARN 1425 /* Voltage ASIC Core Low Warn Threshold */ +#define SK_SEN_CORE_1V5_LOW_ERR 1350 /* Voltage ASIC Core Low Err Threshold */ /* * FAN 1 speed @@ -239,10 +283,17 @@ * error at: 70 % * no upper limit */ -#define SK_SEN_ERRHIGH 20000 /* FAN Speed High Err Threshold */ -#define SK_SEN_WARNHIGH 20000 /* FAN Speed High Warn Threshold */ -#define SK_SEN_WARNLOW 5200 /* FAN Speed Low Err Threshold */ -#define SK_SEN_ERRLOW 4550 /* FAN Speed Low Warn Threshold */ +#define SK_SEN_FAN_HIGH_ERR 20000 /* FAN Speed High Err Threshold */ +#define SK_SEN_FAN_HIGH_WARN 20000 /* FAN Speed High Warn Threshold */ +#define SK_SEN_FAN_LOW_WARN 5200 /* FAN Speed Low Warn Threshold */ +#define SK_SEN_FAN_LOW_ERR 4550 /* FAN Speed Low Err Threshold */ + +/* + * Some Voltages need dynamic thresholds + */ +#define SK_SEN_DYN_INIT_NONE 0 /* No dynamic init of thresholds */ +#define SK_SEN_DYN_INIT_PCI_IO 10 /* Init PCI-IO with new thresholds */ +#define SK_SEN_DYN_INIT_VAUX 11 /* Init VAUX with new thresholds */ extern int SkLm80ReadSensor(SK_AC *pAC, SK_IOC IoC, SK_SENSOR *pSen); #endif /* n_INC_SKGEI2C_H */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/skgeinit.h linux.21pre4-ac2/drivers/net/sk98lin/h/skgeinit.h --- linux.21pre4/drivers/net/sk98lin/h/skgeinit.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/skgeinit.h 2003-01-06 15:38:18.000000000 +0000 @@ -2,15 +2,15 @@ * * Name: skgeinit.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.51 $ - * Date: $Date: 2001/02/09 12:26:38 $ + * Version: $Revision: 1.73 $ + * Date: $Date: 2002/11/15 12:47:25 $ * Purpose: Structures and prototypes for the GE Init Module * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2001 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +26,115 @@ * History: * * $Log: skgeinit.h,v $ + * Revision 1.73 2002/11/15 12:47:25 rschmidt + * Replaced error message SKERR_HWI_E024 for Cable Diagnostic with + * Rx queue error in SkGeStopPort(). + * + * Revision 1.72 2002/11/12 17:08:35 rschmidt + * Added entries for Cable Diagnostic to Port structure + * Added entries GIPciSlot64 and GIPciClock66 in s_GeInit structure + * Added error message for Cable Diagnostic + * Added prototypes for SkGmCableDiagStatus() + * Editorial changes + * + * Revision 1.71 2002/10/21 11:26:10 mkarl + * Changed interface of SkGeInitAssignRamToQueues(). + * + * Revision 1.70 2002/10/14 08:21:32 rschmidt + * Changed type of GICopperType, GIVauxAvail to SK_BOOL + * Added entry PRxOverCnt to Port structure + * Added entry GIYukon32Bit in s_GeInit structure + * Editorial changes + * + * Revision 1.69 2002/10/09 16:57:15 mkarl + * Added some constants and macros for SkGeInitAssignRamToQueues(). + * + * Revision 1.68 2002/09/12 08:58:51 rwahl + * Retrieve counters needed for XMAC errata workarounds directly because + * PNMI returns corrected counter values (e.g. #10620). + * + * Revision 1.67 2002/08/16 14:40:30 rschmidt + * Added entries GIGenesis and GICopperType in s_GeInit structure + * Added prototypes for SkMacHashing() + * Editorial changes + * + * Revision 1.66 2002/08/12 13:27:21 rschmidt + * Added defines for Link speed capabilities + * Added entry PLinkSpeedCap to Port structure + * Added entry GIVauxAvail in s_GeInit structure + * Added prototypes for SkMacPromiscMode() + * Editorial changes + * + * Revision 1.65 2002/08/08 15:46:18 rschmidt + * Added define SK_PHY_ACC_TO for PHY access timeout + * Added define SK_XM_RX_HI_WM for XMAC Rx High Watermark + * Added define SK_MIN_TXQ_SIZE for Min RAM Buffer Tx Queue Size + * Added entry PhyId1 to Port structure + * + * Revision 1.64 2002/07/23 16:02:56 rschmidt + * Added entry GIWolOffs in s_GeInit struct (HW-Bug in YUKON 1st rev.) + * Added prototypes for: SkGePhyRead(), SkGePhyWrite() + * + * Revision 1.63 2002/07/18 08:17:38 rwahl + * Corrected definitions for SK_LSPEED_xxx & SK_LSPEED_STAT_xxx. + * + * Revision 1.62 2002/07/17 18:21:55 rwahl + * Added SK_LSPEED_INDETERMINATED define. + * + * Revision 1.61 2002/07/17 17:16:03 rwahl + * - MacType now member of GIni struct. + * - Struct alignment to 32bit. + * - Editorial change. + * + * Revision 1.60 2002/07/15 18:23:39 rwahl + * Added GeMacFunc to GE Init structure. + * Added prototypes for SkXmUpdateStats(), SkGmUpdateStats(), + * SkXmMacStatistic(), SkGmMacStatistic(), SkXmResetCounter(), + * SkGmResetCounter(), SkXmOverflowStatus(), SkGmOverflowStatus(). + * Added defines for current link speed state. + * Added ERRMSG defintions for MacUpdateStat() & MacStatistics(). + * + * Revision 1.59 2002/07/15 15:40:22 rschmidt + * Added entry PLinkSpeedUsed to Port structure + * Editorial changes + * + * Revision 1.58 2002/06/10 09:36:30 rschmidt + * Editorial changes. + * + * Revision 1.57 2002/06/05 08:18:00 rschmidt + * Corrected alignment in Port Structure + * Added new prototypes for GMAC + * Editorial changes + * + * Revision 1.56 2002/04/25 11:38:12 rschmidt + * Added defines for Link speed values + * Added defines for Loopback parameters for MAC and PHY + * Removed entry PRxCmd from Port structure + * Added entry PLinkSpeed to Port structure + * Added entries GIChipId and GIChipRev to GE Init structure + * Removed entry GIAnyPortAct from GE Init structure + * Added prototypes for: SkMacInit(), SkMacInitPhy(), + * SkMacRxTxDisable(), SkMacSoftRst(), SkMacHardRst(), SkMacIrq(), + * SkMacIrqDisable(), SkMacFlushTxFifo(), SkMacFlushRxFifo(), + * SkMacAutoNegDone(), SkMacAutoNegLipaPhy(), SkMacSetRxTxEn(), + * SkXmPhyRead(), SkXmPhyRead(), SkGmPhyWrite(), SkGmPhyWrite(); + * Removed prototypes for static functions in SkXmac2.c + * Editorial changes + * + * Revision 1.55 2002/02/26 15:24:53 rwahl + * Fix: no link with manual configuration (#10673). The previous fix for + * #10639 was removed. So for RLMT mode = CLS the RLMT may switch to + * misconfigured port. It should not occur for the other RLMT modes. + * + * Revision 1.54 2002/01/18 16:52:52 rwahl + * Editorial corrections. + * + * Revision 1.53 2001/11/20 09:19:58 rwahl + * Reworked bugfix #10639 (no dependency to RLMT mode). + * + * Revision 1.52 2001/10/26 07:52:23 afischer + * Port switching bug in `check local link` mode + * * Revision 1.51 2001/02/09 12:26:38 cgoos * Inserted #ifdef DIAG for half duplex workaround timer. * @@ -130,7 +239,7 @@ * Add GIRamOffs. * * Revision 1.20 1998/10/19 07:28:37 malthoff - * Add prototyp for SkGeInitRamIface(). + * Add prototype for SkGeInitRamIface(). * * Revision 1.19 1998/10/14 14:47:48 malthoff * SK_TIMER should not be defined for Diagnostics. @@ -156,7 +265,7 @@ * Add some error log messages. * * Revision 1.13 1998/10/06 14:13:14 malthoff - * Add prototyp for SkGeLoadLnkSyncCnt(). + * Add prototype for SkGeLoadLnkSyncCnt(). * * Revision 1.12 1998/10/05 11:29:53 malthoff * bug fix: A comment was not closed. @@ -194,7 +303,7 @@ * Revision 1.4 1998/09/03 09:55:31 malthoff * Add constants for parameters Dir and RstMode * when calling SkGeStopPort(). - * Rework the prototyp section. + * Rework the prototype section. * Add Queue Address offsets PRxQOff, PXsQOff, and PXaQOff. * Remove Ioc with IoC. * @@ -208,7 +317,6 @@ * Revision 1.1 1998/07/23 09:50:24 malthoff * Created. * - * ******************************************************************************/ #ifndef __INC_SKGEINIT_H_ @@ -220,53 +328,49 @@ /* defines ********************************************************************/ -/* - * defines for modifying Link LED behaviour (has been used with SkGeLinkLED()) - */ -#define SK_LNK_OFF LED_OFF -#define SK_LNK_ON (LED_ON | LED_BLK_OFF| LED_SYNC_OFF) -#define SK_LNK_BLINK (LED_ON | LED_BLK_ON | LED_SYNC_ON) -#define SK_LNK_PERM (LED_ON | LED_BLK_OFF| LED_SYNC_ON) -#define SK_LNK_TST (LED_ON | LED_BLK_ON | LED_SYNC_OFF) - -/* - * defines for parameter 'Mode' when calling SK_HWAC_LINK_LED() - */ -#define SK_LED_OFF LED_OFF -#define SK_LED_ACTIVE (LED_ON | LED_BLK_OFF| LED_SYNC_OFF) -#define SK_LED_STANDBY (LED_ON | LED_BLK_ON| LED_SYNC_OFF) +/* modifying Link LED behaviour (used with SkGeLinkLED()) */ +#define SK_LNK_OFF LED_OFF +#define SK_LNK_ON (LED_ON | LED_BLK_OFF | LED_SYNC_OFF) +#define SK_LNK_BLINK (LED_ON | LED_BLK_ON | LED_SYNC_ON) +#define SK_LNK_PERM (LED_ON | LED_BLK_OFF | LED_SYNC_ON) +#define SK_LNK_TST (LED_ON | LED_BLK_ON | LED_SYNC_OFF) + +/* parameter 'Mode' when calling SK_HWAC_LINK_LED() */ +#define SK_LED_OFF LED_OFF +#define SK_LED_ACTIVE (LED_ON | LED_BLK_OFF | LED_SYNC_OFF) +#define SK_LED_STANDBY (LED_ON | LED_BLK_ON | LED_SYNC_OFF) + +/* addressing LED Registers in SkGeXmitLED() */ +#define XMIT_LED_INI 0 +#define XMIT_LED_CNT (RX_LED_VAL - RX_LED_INI) +#define XMIT_LED_CTRL (RX_LED_CTRL- RX_LED_INI) +#define XMIT_LED_TST (RX_LED_TST - RX_LED_INI) -/* - * defines for parameter 'Mode' when calling SkGeXmitLED() - */ +/* parameter 'Mode' when calling SkGeXmitLED() */ #define SK_LED_DIS 0 #define SK_LED_ENA 1 #define SK_LED_TST 2 -/* - * Counter and Timer constants, for a host clock of 62.5 MHz - */ -#define SK_XMIT_DUR 0x002faf08L /* 50 ms */ -#define SK_BLK_DUR 0x01dcd650L /* 500 ms */ +/* Counter and Timer constants, for a host clock of 62.5 MHz */ +#define SK_XMIT_DUR 0x002faf08L /* 50 ms */ +#define SK_BLK_DUR 0x01dcd650L /* 500 ms */ #define SK_DPOLL_DEF 0x00EE6B28L /* 250 ms */ #define SK_DPOLL_MAX 0x00FFFFFFL /* ca. 268ms */ -#define SK_FACT_62 100 /* is given in percent */ -#define SK_FACT_53 85 +#define SK_FACT_62 100 /* is given in percent */ +#define SK_FACT_53 85 -/* - * Timeout values - */ -#define SK_MAC_TO_53 72 /* MAC arbiter timeout */ +/* Timeout values */ +#define SK_MAC_TO_53 72 /* MAC arbiter timeout */ #define SK_PKT_TO_53 0x2000 /* Packet arbiter timeout */ #define SK_PKT_TO_MAX 0xffff /* Maximum value */ -#define SK_RI_TO_53 36 /* RAM interface timeout */ +#define SK_RI_TO_53 36 /* RAM interface timeout */ -/* - * RAM Buffer High Pause Threshold values - */ -#define SK_RB_ULPP ( 8 * 1024) /* Upper Level in kB/8 */ +#define SK_PHY_ACC_TO 600000 /* PHY access timeout */ + +/* RAM Buffer High Pause Threshold values */ +#define SK_RB_ULPP ( 8 * 1024) /* Upper Level in kB/8 */ #define SK_RB_LLPP_S (10 * 1024) /* Lower Level for small Queues */ #define SK_RB_LLPP_B (16 * 1024) /* Lower Level for big Queues */ @@ -274,9 +378,12 @@ #define SK_BMU_RX_WM 0x600 /* BMU Rx Watermark */ #endif #ifndef SK_BMU_TX_WM -#define SK_BMU_TX_WM 0x600 /* BMU Rx Watermark */ +#define SK_BMU_TX_WM 0x600 /* BMU Tx Watermark */ #endif +/* XMAC II Rx High Watermark */ +#define SK_XM_RX_HI_WM 0x05aa /* 1450 */ + /* XMAC II Tx Threshold */ #define SK_XM_THR_REDL 0x01fb /* .. for redundant link usage */ #define SK_XM_THR_SL 0x01fb /* .. for single link adapters */ @@ -284,184 +391,218 @@ #define SK_XM_THR_JUMBO 0x03fc /* .. for jumbo frame usage */ /* values for GIPortUsage */ -#define SK_RED_LINK 1 /* redundant link usage */ -#define SK_MUL_LINK 2 /* multiple link usage */ +#define SK_RED_LINK 1 /* redundant link usage */ +#define SK_MUL_LINK 2 /* multiple link usage */ #define SK_JUMBO_LINK 3 /* driver uses jumbo frames */ -/* Minimum RAM Buffer Receive Queue Size */ -#define SK_MIN_RXQ_SIZE 16 /* 16 kB */ -/* - * defines for parameter 'Dir' when calling SkGeStopPort() - */ -#define SK_STOP_TX 1 /* Stops the transmit path, resets the XMAC */ +/* Minimum RAM Buffer Rx Queue Size */ +#define SK_MIN_RXQ_SIZE 16 /* 16 kB */ + +/* Minimum RAM Buffer Tx Queue Size */ +#define SK_MIN_TXQ_SIZE 16 /* 16 kB */ + +/* Queue Size units */ +#define QZ_UNITS 0x7 +#define QZ_STEP 8 + +/* Percentage of queue size from whole memory */ +/* 80 % for receive */ +#define RAM_QUOTA_RX 80L +/* 0% for sync transfer */ +#define RAM_QUOTA_SYNC 0L +/* the rest (20%) is taken for async transfer */ + +/* Get the rounded queue size in Bytes in 8k steps */ +#define ROUND_QUEUE_SIZE(SizeInBytes) \ + ((((unsigned long) (SizeInBytes) + (QZ_STEP*1024L)-1) / 1024) & \ + ~(QZ_STEP-1)) + +/* Get the rounded queue size in KBytes in 8k steps */ +#define ROUND_QUEUE_SIZE_KB(Kilobytes) \ + ROUND_QUEUE_SIZE((Kilobytes) * 1024L) + +/* Types of RAM Buffer Queues */ +#define SK_RX_SRAM_Q 1 /* small receive queue */ +#define SK_RX_BRAM_Q 2 /* big receive queue */ +#define SK_TX_RAM_Q 3 /* small or big transmit queue */ + +/* parameter 'Dir' when calling SkGeStopPort() */ +#define SK_STOP_TX 1 /* Stops the transmit path, resets the XMAC */ #define SK_STOP_RX 2 /* Stops the receive path */ -#define SK_STOP_ALL 3 /* Stops rx and tx path, resets the XMAC */ +#define SK_STOP_ALL 3 /* Stops Rx and Tx path, resets the XMAC */ -/* - * defines for parameter 'RstMode' when calling SkGeStopPort() - */ +/* parameter 'RstMode' when calling SkGeStopPort() */ #define SK_SOFT_RST 1 /* perform a software reset */ #define SK_HARD_RST 2 /* perform a hardware reset */ -/* - * Define Init Levels - */ -#define SK_INIT_DATA 0 /* Init level 0: init data structures */ -#define SK_INIT_IO 1 /* Init level 1: init with IOs */ -#define SK_INIT_RUN 2 /* Init level 2: init for run time */ - -/* - * Set Link Mode Parameter - */ -#define SK_LMODE_HALF 1 /* Half Duplex Mode */ -#define SK_LMODE_FULL 2 /* Full Duplex Mode */ -#define SK_LMODE_AUTOHALF 3 /* AutoHalf Duplex Mode */ -#define SK_LMODE_AUTOFULL 4 /* AutoFull Duplex Mode */ -#define SK_LMODE_AUTOBOTH 5 /* AutoBoth Duplex Mode */ -#define SK_LMODE_AUTOSENSE 6 /* configured mode auto sensing */ +/* Init Levels */ +#define SK_INIT_DATA 0 /* Init level 0: init data structures */ +#define SK_INIT_IO 1 /* Init level 1: init with IOs */ +#define SK_INIT_RUN 2 /* Init level 2: init for run time */ + +/* Link Mode Parameter */ +#define SK_LMODE_HALF 1 /* Half Duplex Mode */ +#define SK_LMODE_FULL 2 /* Full Duplex Mode */ +#define SK_LMODE_AUTOHALF 3 /* AutoHalf Duplex Mode */ +#define SK_LMODE_AUTOFULL 4 /* AutoFull Duplex Mode */ +#define SK_LMODE_AUTOBOTH 5 /* AutoBoth Duplex Mode */ +#define SK_LMODE_AUTOSENSE 6 /* configured mode auto sensing */ #define SK_LMODE_INDETERMINATED 7 /* Return value for virtual port if - * multiple ports are differently - * configured. + * multiple ports are differently configured. + */ + +/* Auto-negotiation timeout in 100ms granularity */ +#define SK_AND_MAX_TO 6 /* Wait 600 msec before link comes up */ + +/* Auto-negotiation error codes */ +#define SK_AND_OK 0 /* no error */ +#define SK_AND_OTHER 1 /* other error than below */ +#define SK_AND_DUP_CAP 2 /* Duplex capabilities error */ + + +/* Link Speed Capabilities */ +#define SK_LSPEED_CAP_AUTO (1<<0) /* Automatic resolution */ +#define SK_LSPEED_CAP_10MBPS (1<<1) /* 10 Mbps */ +#define SK_LSPEED_CAP_100MBPS (1<<2) /* 100 Mbps */ +#define SK_LSPEED_CAP_1000MBPS (1<<3) /* 1000 Mbps */ +#define SK_LSPEED_CAP_INDETERMINATED (1<<4) /* Return value for virtual port if + * multiple ports are differently configured. + */ + +/* Link Speed Parameter */ +#define SK_LSPEED_AUTO 1 /* Automatic resolution */ +#define SK_LSPEED_10MBPS 2 /* 10 Mbps */ +#define SK_LSPEED_100MBPS 3 /* 100 Mbps */ +#define SK_LSPEED_1000MBPS 4 /* 1000 Mbps */ +#define SK_LSPEED_INDETERMINATED 5 /* Return value for virtual port if + * multiple ports are differently configured. */ -/* - * Autonegotiation timeout in 100ms granularity. - */ -#define SK_AND_MAX_TO 6 /* Wait 600 msec before link comes up */ +/* Link Speed Current State */ +#define SK_LSPEED_STAT_UNKNOWN 1 +#define SK_LSPEED_STAT_10MBPS 2 +#define SK_LSPEED_STAT_100MBPS 3 +#define SK_LSPEED_STAT_1000MBPS 4 +#define SK_LSPEED_STAT_INDETERMINATED 5 /* Return value for virtual port if + * multiple ports are differently configured. + */ -/* - * Define Autonegotiation error codes here - */ -#define SK_AND_OK 0 /* no error */ -#define SK_AND_OTHER 1 /* other error than below */ -#define SK_AND_DUP_CAP 2 /* Duplex capabilities error */ -/* - * Link Capability value - */ -#define SK_LMODE_CAP_HALF (1<<0) /* Half Duplex Mode */ -#define SK_LMODE_CAP_FULL (1<<1) /* Full Duplex Mode */ -#define SK_LMODE_CAP_AUTOHALF (1<<2) /* AutoHalf Duplex Mode */ -#define SK_LMODE_CAP_AUTOFULL (1<<3) /* AutoFull Duplex Mode */ +/* Link Capability Parameter */ +#define SK_LMODE_CAP_HALF (1<<0) /* Half Duplex Mode */ +#define SK_LMODE_CAP_FULL (1<<1) /* Full Duplex Mode */ +#define SK_LMODE_CAP_AUTOHALF (1<<2) /* AutoHalf Duplex Mode */ +#define SK_LMODE_CAP_AUTOFULL (1<<3) /* AutoFull Duplex Mode */ #define SK_LMODE_CAP_INDETERMINATED (1<<4) /* Return value for virtual port if - * multiple ports are differently - * configured. + * multiple ports are differently configured. */ -/* - * Link mode current state - */ -#define SK_LMODE_STAT_UNKNOWN 1 /* Unknown Duplex Mode */ -#define SK_LMODE_STAT_HALF 2 /* Half Duplex Mode */ -#define SK_LMODE_STAT_FULL 3 /* Full Duplex Mode */ +/* Link Mode Current State */ +#define SK_LMODE_STAT_UNKNOWN 1 /* Unknown Duplex Mode */ +#define SK_LMODE_STAT_HALF 2 /* Half Duplex Mode */ +#define SK_LMODE_STAT_FULL 3 /* Full Duplex Mode */ #define SK_LMODE_STAT_AUTOHALF 4 /* Half Duplex Mode obtained by AutoNeg */ -#define SK_LMODE_STAT_AUTOFULL 5 /* Half Duplex Mode obtained by AutoNeg */ +#define SK_LMODE_STAT_AUTOFULL 5 /* Full Duplex Mode obtained by AutoNeg */ #define SK_LMODE_STAT_INDETERMINATED 6 /* Return value for virtual port if - * multiple ports are differently - * configured. + * multiple ports are differently configured. */ -/* - * Set Flow Control Mode Parameter (and capabilities) - */ -#define SK_FLOW_MODE_NONE 1 /* No Flow Control */ -#define SK_FLOW_MODE_LOC_SEND 2 /* Local station sends PAUSE */ -#define SK_FLOW_MODE_SYMMETRIC 3 /* Both station may send PAUSE */ -#define SK_FLOW_MODE_SYM_OR_REM 4 /* Both station may send PAUSE or - * just the remote station may send - * PAUSE +/* Flow Control Mode Parameter (and capabilities) */ +#define SK_FLOW_MODE_NONE 1 /* No Flow Control */ +#define SK_FLOW_MODE_LOC_SEND 2 /* Local station sends PAUSE */ +#define SK_FLOW_MODE_SYMMETRIC 3 /* Both station may send PAUSE */ +#define SK_FLOW_MODE_SYM_OR_REM 4 /* Both station may send PAUSE or + * just the remote station may send PAUSE */ #define SK_FLOW_MODE_INDETERMINATED 5 /* Return value for virtual port if - * multiple ports are differently - * configured. + * multiple ports are differently configured. */ -/* - * Flow Control Status Parameter - */ -#define SK_FLOW_STAT_NONE 1 /* No Flow Control */ -#define SK_FLOW_STAT_REM_SEND 2 /* Remote Station sends PAUSE */ -#define SK_FLOW_STAT_LOC_SEND 3 /* Local station sends PAUSE */ -#define SK_FLOW_STAT_SYMMETRIC 4 /* Both station may send PAUSE */ +/* Flow Control Status Parameter */ +#define SK_FLOW_STAT_NONE 1 /* No Flow Control */ +#define SK_FLOW_STAT_REM_SEND 2 /* Remote Station sends PAUSE */ +#define SK_FLOW_STAT_LOC_SEND 3 /* Local station sends PAUSE */ +#define SK_FLOW_STAT_SYMMETRIC 4 /* Both station may send PAUSE */ #define SK_FLOW_STAT_INDETERMINATED 5 /* Return value for virtual port if - * multiple ports are differently - * configured. + * multiple ports are differently configured. */ -/* - * Master/Slave Mode capabilities - */ -#define SK_MS_CAP_AUTO (1<<0) /* Automatic resolution */ -#define SK_MS_CAP_MASTER (1<<1) /* This station is master */ -#define SK_MS_CAP_SLAVE (1<<2) /* This station is slave */ -#define SK_MS_CAP_INDETERMINATED (1<<3) /* Return value for virtual port if - * multiple ports are differently - * configured. +/* Master/Slave Mode Capabilities */ +#define SK_MS_CAP_AUTO (1<<0) /* Automatic resolution */ +#define SK_MS_CAP_MASTER (1<<1) /* This station is master */ +#define SK_MS_CAP_SLAVE (1<<2) /* This station is slave */ +#define SK_MS_CAP_INDETERMINATED (1<<3) /* Return value for virtual port if + * multiple ports are differently configured. */ -/* - * Set Master/Slave Mode Parameter (and capabilities) - */ -#define SK_MS_MODE_AUTO 1 /* Automatic resolution */ -#define SK_MS_MODE_MASTER 2 /* This station is master */ -#define SK_MS_MODE_SLAVE 3 /* This station is slave */ -#define SK_MS_MODE_INDETERMINATED 4 /* Return value for virtual port if +/* Set Master/Slave Mode Parameter (and capabilities) */ +#define SK_MS_MODE_AUTO 1 /* Automatic resolution */ +#define SK_MS_MODE_MASTER 2 /* This station is master */ +#define SK_MS_MODE_SLAVE 3 /* This station is slave */ +#define SK_MS_MODE_INDETERMINATED 4 /* Return value for virtual port if * multiple ports are differently */ -/* - * Master/Slave Status Parameter - */ -#define SK_MS_STAT_UNSET 1 /* The MS status is never been determ*/ -#define SK_MS_STAT_MASTER 2 /* This station is master */ -#define SK_MS_STAT_SLAVE 3 /* This station is slave */ -#define SK_MS_STAT_FAULT 4 /* MS resolution failed */ +/* Master/Slave Status Parameter */ +#define SK_MS_STAT_UNSET 1 /* The MS status is never been determ*/ +#define SK_MS_STAT_MASTER 2 /* This station is master */ +#define SK_MS_STAT_SLAVE 3 /* This station is slave */ +#define SK_MS_STAT_FAULT 4 /* MS resolution failed */ #define SK_MS_STAT_INDETERMINATED 5 /* Return value for virtual port if * multiple ports are differently */ -/* - * defines for parameter 'Mode' when calling SkXmSetRxCmd() - */ -#define SK_STRIP_FCS_ON (1<<0) /* Enable FCS stripping of rx frames */ -#define SK_STRIP_FCS_OFF (1<<1) /* Disable FCS stripping of rx frames */ -#define SK_STRIP_PAD_ON (1<<2) /* Enable pad byte stripping of rx f */ -#define SK_STRIP_PAD_OFF (1<<3) /* Disable pad byte stripping of rx f */ -#define SK_LENERR_OK_ON (1<<4) /* Don't chk fr for in range len error*/ -#define SK_LENERR_OK_OFF (1<<5) /* Check frames for in range len error*/ +/* parameter 'Mode' when calling SkXmSetRxCmd() */ +#define SK_STRIP_FCS_ON (1<<0) /* Enable FCS stripping of Rx frames */ +#define SK_STRIP_FCS_OFF (1<<1) /* Disable FCS stripping of Rx frames */ +#define SK_STRIP_PAD_ON (1<<2) /* Enable pad byte stripping of Rx fr */ +#define SK_STRIP_PAD_OFF (1<<3) /* Disable pad byte stripping of Rx fr */ +#define SK_LENERR_OK_ON (1<<4) /* Don't chk fr for in range len error */ +#define SK_LENERR_OK_OFF (1<<5) /* Check frames for in range len error */ #define SK_BIG_PK_OK_ON (1<<6) /* Don't set rcvError bit for big fr */ #define SK_BIG_PK_OK_OFF (1<<7) /* Set rcvError bit for big frames */ +#define SK_SELF_RX_ON (1<<8) /* Enable Rx of own packets */ +#define SK_SELF_RX_OFF (1<<9) /* Disable Rx of own packets */ -/* - * States of PState - */ +/* parameter 'Para' when calling SkMacSetRxTxEn() */ +#define SK_MAC_LOOPB_ON (1<<0) /* Enable MAC Loopback Mode */ +#define SK_MAC_LOOPB_OFF (1<<1) /* Disable MAC Loopback Mode */ +#define SK_PHY_LOOPB_ON (1<<2) /* Enable PHY Loopback Mode */ +#define SK_PHY_LOOPB_OFF (1<<3) /* Disable PHY Loopback Mode */ +#define SK_PHY_FULLD_ON (1<<4) /* Enable GMII Full Duplex */ +#define SK_PHY_FULLD_OFF (1<<5) /* Disable GMII Full Duplex */ + +/* States of PState */ #define SK_PRT_RESET 0 /* the port is reset */ -#define SK_PRT_STOP 1 /* the port is stopped (similar to sw reset) */ -#define SK_PRT_INIT 2 /* the port is initialized */ -#define SK_PRT_RUN 3 /* the port has an active link */ +#define SK_PRT_STOP 1 /* the port is stopped (similar to SW reset) */ +#define SK_PRT_INIT 2 /* the port is initialized */ +#define SK_PRT_RUN 3 /* the port has an active link */ + +/* Default receive frame limit for Workaround of XMAC Errata */ +#define SK_DEF_RX_WA_LIM SK_CONSTU64(100) + +/* Link Partner Status */ +#define SK_LIPA_UNKNOWN 0 /* Link partner is in unknown state */ +#define SK_LIPA_MANUAL 1 /* Link partner is in detected manual state */ +#define SK_LIPA_AUTO 2 /* Link partner is in auto-negotiation state */ -/* - * Default receive frame limit for Workaround of XMAC Errata - */ -#define SK_DEF_RX_WA_LIM SK_CONSTU64(100) +/* Maximum Restarts before restart is ignored (3Com WA) */ +#define SK_MAX_LRESTART 3 /* Max. 3 times the link is restarted */ -/* - * Define link partner Status - */ -#define SK_LIPA_UNKNOWN 0 /* Link partner is in unknown state */ -#define SK_LIPA_MANUAL 1 /* Link partner is in detected manual state */ -#define SK_LIPA_AUTO 2 /* Link partner is in autonegotiation state */ +/* Max. Auto-neg. timeouts before link detection in sense mode is reset */ +#define SK_MAX_ANEG_TO 10 /* Max. 10 times the sense mode is reset */ -/* - * Define Maximum Restarts before restart is ignored (3com WA) - */ -#define SK_MAX_LRESTART 3 /* Max. 3 times the link is restarted */ +/* structures *****************************************************************/ /* - * define max. autonegotiation timeouts before link detection in sense mode is - * reset. + * MAC specific functions */ -#define SK_MAX_ANEG_TO 10 /* Max. 10 times the sense mode is reset */ - -/* structures *****************************************************************/ +typedef struct s_GeMacFunc { + int (*pFnMacUpdateStats)(SK_AC *pAC, SK_IOC IoC, unsigned int Port); + int (*pFnMacStatistic)(SK_AC *pAC, SK_IOC IoC, unsigned int Port, + SK_U16 StatAddr, SK_U32 *pVal); + int (*pFnMacResetCounter)(SK_AC *pAC, SK_IOC IoC, unsigned int Port); + int (*pFnMacOverflow)(SK_AC *pAC, SK_IOC IoC, unsigned int Port, + SK_U16 IStatus, SK_U64 *pVal); +} SK_GEMACFUNC; /* * Port Structure @@ -469,37 +610,35 @@ typedef struct s_GePort { #ifndef SK_DIAG SK_TIMER PWaTimer; /* Workaround Timer */ -#endif - SK_U64 PPrevShorts; /* Previous short Counter checking */ + SK_TIMER HalfDupChkTimer; +#endif /* SK_DIAG */ + SK_U32 PPrevShorts; /* Previous short Counter checking */ + SK_U32 PPrevFcs; /* Previous FCS Error Counter checking */ SK_U64 PPrevRx; /* Previous RxOk Counter checking */ - SK_U64 PPrevFcs; /* Previous FCS Error Counter checking */ SK_U64 PRxLim; /* Previous RxOk Counter checking */ SK_U64 LastOctets; /* For half duplex hang check */ -#ifndef SK_DIAG - SK_TIMER HalfDupChkTimer; -#endif int PLinkResCt; /* Link Restart Counter */ - int PAutoNegTimeOut;/* AutoNegotiation timeout current value */ - int PAutoNegTOCt; /* AutoNeg Timeout Counter */ + int PAutoNegTimeOut;/* Auto-negotiation timeout current value */ + int PAutoNegTOCt; /* Auto-negotiation Timeout Counter */ int PRxQSize; /* Port Rx Queue Size in kB */ - int PXSQSize; /* Port Synchronous Transmit Queue Size in kB */ - int PXAQSize; /* Port Asynchronous Transmit Queue Size in kB*/ + int PXSQSize; /* Port Synchronous Transmit Queue Size in kB */ + int PXAQSize; /* Port Asynchronous Transmit Queue Size in kB */ SK_U32 PRxQRamStart; /* Receive Queue RAM Buffer Start Address */ SK_U32 PRxQRamEnd; /* Receive Queue RAM Buffer End Address */ SK_U32 PXsQRamStart; /* Sync Tx Queue RAM Buffer Start Address */ SK_U32 PXsQRamEnd; /* Sync Tx Queue RAM Buffer End Address */ SK_U32 PXaQRamStart; /* Async Tx Queue RAM Buffer Start Address */ SK_U32 PXaQRamEnd; /* Async Tx Queue RAM Buffer End Address */ + SK_U32 PRxOverCnt; /* Receive Overflow Counter */ int PRxQOff; /* Rx Queue Address Offset */ int PXsQOff; /* Synchronous Tx Queue Address Offset */ int PXaQOff; /* Asynchronous Tx Queue Address Offset */ int PhyType; /* PHY used on this port */ + SK_U16 PhyId1; /* PHY Id1 on this port */ SK_U16 PhyAddr; /* MDIO/MDC PHY address */ - SK_U16 PRxCmd; /* Port Receive Command Configuration Value */ SK_U16 PIsave; /* Saved Interrupt status word */ SK_U16 PSsave; /* Saved PHY status word */ - SK_U16 Align01; - SK_BOOL PHWLinkUp; /* The hardware Link is up (wireing) */ + SK_BOOL PHWLinkUp; /* The hardware Link is up (wiring) */ SK_BOOL PState; /* Is port initialized ? */ SK_BOOL PLinkBroken; /* Is Link broken ? */ SK_BOOL PCheckPar; /* Do we check for parity errors ? */ @@ -508,77 +647,102 @@ SK_U8 PLinkModeConf; /* Link Mode configured */ SK_U8 PLinkMode; /* Link Mode currently used */ SK_U8 PLinkModeStatus;/* Link Mode Status */ + SK_U8 PLinkSpeedCap; /* Link Speed Capabilities(10/100/1000 Mbps) */ + SK_U8 PLinkSpeed; /* configured Link Speed (10/100/1000 Mbps) */ + SK_U8 PLinkSpeedUsed; /* current Link Speed (10/100/1000 Mbps) */ SK_U8 PFlowCtrlCap; /* Flow Control Capabilities */ SK_U8 PFlowCtrlMode; /* Flow Control Mode */ SK_U8 PFlowCtrlStatus;/* Flow Control Status */ SK_U8 PMSCap; /* Master/Slave Capabilities */ SK_U8 PMSMode; /* Master/Slave Mode */ SK_U8 PMSStatus; /* Master/Slave Status */ - SK_U8 PAutoNegFail; /* Autonegotiation fail flag */ - SK_U8 PLipaAutoNeg; /* Autonegotiation possible with Link Partner */ - SK_U8 Align02; + SK_U8 PAutoNegFail; /* Auto-negotiation fail flag */ + SK_U8 PLipaAutoNeg; /* Auto-negotiation possible with Link Partner */ + SK_U8 PCableLen; /* Cable Length */ + SK_U8 PMdiPairLen[4]; /* MDI[0..3] Pair Length */ + SK_U8 PMdiPairSts[4]; /* MDI[0..3] Pair Diagnostic Status */ } SK_GEPORT; /* - * Gigabit Ethernet Initalization Struct + * Gigabit Ethernet Initialization Struct * (has to be included in the adapter context) */ typedef struct s_GeInit { + SK_U8 GIPciHwRev; /* PCI HW Revision Number */ + SK_U8 GIChipId; /* Chip Identification Number */ + SK_U8 GIChipRev; /* Chip Revision Number */ + SK_BOOL GIGenesis; /* Genesis adapter ? */ + SK_BOOL GICopperType; /* Copper Type adapter ? */ + SK_BOOL GIPciSlot64; /* 64-bit PCI Slot */ + SK_BOOL GIPciClock66; /* 66 MHz PCI Clock */ + SK_BOOL GIVauxAvail; /* VAUX available (YUKON) */ + SK_BOOL GIYukon32Bit; /* 32-Bit YUKON adapter */ int GIMacsFound; /* Number of MACs found on this adapter */ - int GIPciHwRev; /* PCI HW Revision Number */ - SK_U32 GIRamOffs; /* RAM Address Offset for addr calculation */ - int GIRamSize; /* The RAM size of the adapter in kB */ + int GIMacType; /* MAC Type used on this adapter */ int GIHstClkFact; /* Host Clock Factor (62.5 / HstClk * 100) */ - int GIPortUsage; /* driver port usage: SK_RED_LINK/SK_MUL_LINK */ + int GIPortUsage; /* Driver Port Usage: SK_RED_LINK/SK_MUL_LINK */ + int GILevel; /* Initialization Level completed */ + int GIRamSize; /* The RAM size of the adapter in kB */ + int GIWolOffs; /* WOL Register Offset (HW-Bug in 1st revision) */ + SK_U32 GIRamOffs; /* RAM Address Offset for addr calculation */ SK_U32 GIPollTimerVal; /* Descriptor Poll Timer Init Val in clk ticks*/ - int GILevel; /* Initialization Level Completed */ SK_GEPORT GP[SK_MAX_MACS];/* Port Dependent Information */ - SK_BOOL GIAnyPortAct; /* Is True if one or more port is initialized */ - SK_U8 Align01; - SK_U16 Align02; + SK_GEMACFUNC GIFunc; /* MAC depedent functions */ } SK_GEINIT; /* - * Define the error numbers and messages for xmac_ii.c and skgeinit.c + * Error numbers and messages for skxmac2.c and skgeinit.c */ -#define SKERR_HWI_E001 (SK_ERRBASE_HWINIT) -#define SKERR_HWI_E001MSG "SkXmClrExactAddr() has got illegal parameters" -#define SKERR_HWI_E002 (SKERR_HWI_E001+1) -#define SKERR_HWI_E002MSG "SkGeInit() Level 1 call missing" -#define SKERR_HWI_E003 (SKERR_HWI_E002+1) -#define SKERR_HWI_E003MSG "SkGeInit() called with illegal init Level" -#define SKERR_HWI_E004 (SKERR_HWI_E003+1) -#define SKERR_HWI_E004MSG "SkGeInitPort() Queue size illegal configured" -#define SKERR_HWI_E005 (SKERR_HWI_E004+1) -#define SKERR_HWI_E005MSG "SkGeInitPort() cannot init running ports" -#define SKERR_HWI_E006 (SKERR_HWI_E005+1) -#define SKERR_HWI_E006MSG "SkGeXmInit(): PState does not match HW state" -#define SKERR_HWI_E007 (SKERR_HWI_E006+1) -#define SKERR_HWI_E007MSG "SkXmInitDupMd() called with invalid Dup Mode" -#define SKERR_HWI_E008 (SKERR_HWI_E007+1) -#define SKERR_HWI_E008MSG "SkXmSetRxCmd() called with invalid Mode" -#define SKERR_HWI_E009 (SKERR_HWI_E008+1) -#define SKERR_HWI_E009MSG "SkGeCfgSync() called although PXSQSize zero" -#define SKERR_HWI_E010 (SKERR_HWI_E009+1) -#define SKERR_HWI_E010MSG "SkGeCfgSync() called with invalid parameters" -#define SKERR_HWI_E011 (SKERR_HWI_E010+1) -#define SKERR_HWI_E011MSG "SkGeInitPort() Receive Queue Size to small" -#define SKERR_HWI_E012 (SKERR_HWI_E011+1) -#define SKERR_HWI_E012MSG "SkGeInitPort() invalid Queue Size specified" -#define SKERR_HWI_E013 (SKERR_HWI_E012+1) -#define SKERR_HWI_E013MSG "SkGeInitPort() cfg changed for running queue" -#define SKERR_HWI_E014 (SKERR_HWI_E013+1) -#define SKERR_HWI_E014MSG "SkGeInitPort() unknown GIPortUsage specified" -#define SKERR_HWI_E015 (SKERR_HWI_E014+1) -#define SKERR_HWI_E015MSG "Illegal Link mode parameter" -#define SKERR_HWI_E016 (SKERR_HWI_E015+1) -#define SKERR_HWI_E016MSG "Illegal Flow control mode parameter" -#define SKERR_HWI_E017 (SKERR_HWI_E016+1) -#define SKERR_HWI_E017MSG "Illegal value specified for GIPollTimerVal" -#define SKERR_HWI_E018 (SKERR_HWI_E017+1) -#define SKERR_HWI_E018MSG "FATAL: SkGeStopPort() does not terminate" -#define SKERR_HWI_E019 (SKERR_HWI_E018+1) -#define SKERR_HWI_E019MSG "" +#define SKERR_HWI_E001 (SK_ERRBASE_HWINIT) +#define SKERR_HWI_E001MSG "SkXmClrExactAddr() has got illegal parameters" +#define SKERR_HWI_E002 (SKERR_HWI_E001+1) +#define SKERR_HWI_E002MSG "SkGeInit(): Level 1 call missing" +#define SKERR_HWI_E003 (SKERR_HWI_E002+1) +#define SKERR_HWI_E003MSG "SkGeInit() called with illegal init Level" +#define SKERR_HWI_E004 (SKERR_HWI_E003+1) +#define SKERR_HWI_E004MSG "SkGeInitPort(): Queue Size illegal configured" +#define SKERR_HWI_E005 (SKERR_HWI_E004+1) +#define SKERR_HWI_E005MSG "SkGeInitPort(): cannot init running ports" +#define SKERR_HWI_E006 (SKERR_HWI_E005+1) +#define SKERR_HWI_E006MSG "SkGeMacInit(): PState does not match HW state" +#define SKERR_HWI_E007 (SKERR_HWI_E006+1) +#define SKERR_HWI_E007MSG "SkXmInitDupMd() called with invalid Dup Mode" +#define SKERR_HWI_E008 (SKERR_HWI_E007+1) +#define SKERR_HWI_E008MSG "SkXmSetRxCmd() called with invalid Mode" +#define SKERR_HWI_E009 (SKERR_HWI_E008+1) +#define SKERR_HWI_E009MSG "SkGeCfgSync() called although PXSQSize zero" +#define SKERR_HWI_E010 (SKERR_HWI_E009+1) +#define SKERR_HWI_E010MSG "SkGeCfgSync() called with invalid parameters" +#define SKERR_HWI_E011 (SKERR_HWI_E010+1) +#define SKERR_HWI_E011MSG "SkGeInitPort(): Receive Queue Size to small" +#define SKERR_HWI_E012 (SKERR_HWI_E011+1) +#define SKERR_HWI_E012MSG "SkGeInitPort(): invalid Queue Size specified" +#define SKERR_HWI_E013 (SKERR_HWI_E012+1) +#define SKERR_HWI_E013MSG "SkGeInitPort(): cfg changed for running queue" +#define SKERR_HWI_E014 (SKERR_HWI_E013+1) +#define SKERR_HWI_E014MSG "SkGeInitPort(): unknown GIPortUsage specified" +#define SKERR_HWI_E015 (SKERR_HWI_E014+1) +#define SKERR_HWI_E015MSG "Illegal Link mode parameter" +#define SKERR_HWI_E016 (SKERR_HWI_E015+1) +#define SKERR_HWI_E016MSG "Illegal Flow control mode parameter" +#define SKERR_HWI_E017 (SKERR_HWI_E016+1) +#define SKERR_HWI_E017MSG "Illegal value specified for GIPollTimerVal" +#define SKERR_HWI_E018 (SKERR_HWI_E017+1) +#define SKERR_HWI_E018MSG "FATAL: SkGeStopPort() does not terminate (Tx)" +#define SKERR_HWI_E019 (SKERR_HWI_E018+1) +#define SKERR_HWI_E019MSG "Illegal Speed parameter" +#define SKERR_HWI_E020 (SKERR_HWI_E019+1) +#define SKERR_HWI_E020MSG "Illegal Master/Slave parameter" +#define SKERR_HWI_E021 (SKERR_HWI_E020+1) +#define SKERR_HWI_E021MSG "MacUpdateStats(): cannot update statistic counter" +#define SKERR_HWI_E022 (SKERR_HWI_E021+1) +#define SKERR_HWI_E022MSG "MacStatistic(): illegal statistic base address" +#define SKERR_HWI_E023 (SKERR_HWI_E022+1) +#define SKERR_HWI_E023MSG "SkGeInitPort(): Transmit Queue Size to small" +#define SKERR_HWI_E024 (SKERR_HWI_E023+1) +#define SKERR_HWI_E024MSG "FATAL: SkGeStopPort() does not terminate (Rx)" +#define SKERR_HWI_E025 (SKERR_HWI_E024+1) +#define SKERR_HWI_E025MSG "" /* function prototypes ********************************************************/ @@ -588,146 +752,301 @@ * public functions in skgeinit.c */ extern void SkGePollRxD( - SK_AC *pAC, - SK_IOC IoC, + SK_AC *pAC, + SK_IOC IoC, int Port, - SK_BOOL PollRxD); + SK_BOOL PollRxD); extern void SkGePollTxD( - SK_AC *pAC, - SK_IOC IoC, + SK_AC *pAC, + SK_IOC IoC, int Port, - SK_BOOL PollTxD); + SK_BOOL PollTxD); extern void SkGeYellowLED( - SK_AC *pAC, - SK_IOC IoC, + SK_AC *pAC, + SK_IOC IoC, int State); extern int SkGeCfgSync( - SK_AC *pAC, - SK_IOC IoC, + SK_AC *pAC, + SK_IOC IoC, int Port, - SK_U32 IntTime, - SK_U32 LimCount, + SK_U32 IntTime, + SK_U32 LimCount, int SyncMode); extern void SkGeLoadLnkSyncCnt( - SK_AC *pAC, - SK_IOC IoC, + SK_AC *pAC, + SK_IOC IoC, int Port, - SK_U32 CntVal); + SK_U32 CntVal); extern void SkGeStopPort( - SK_AC *pAC, - SK_IOC IoC, + SK_AC *pAC, + SK_IOC IoC, int Port, int Dir, int RstMode); extern int SkGeInit( - SK_AC *pAC, - SK_IOC IoC, + SK_AC *pAC, + SK_IOC IoC, int Level); extern void SkGeDeInit( - SK_AC *pAC, - SK_IOC IoC); + SK_AC *pAC, + SK_IOC IoC); extern int SkGeInitPort( - SK_AC *pAC, - SK_IOC IoC, + SK_AC *pAC, + SK_IOC IoC, int Port); extern void SkGeXmitLED( - SK_AC *pAC, - SK_IOC IoC, + SK_AC *pAC, + SK_IOC IoC, int Led, int Mode); extern void SkGeInitRamIface( - SK_AC *pAC, - SK_IOC IoC); + SK_AC *pAC, + SK_IOC IoC); + +extern int SkGeInitAssignRamToQueues( + SK_AC *pAC, + int ActivePort, + SK_BOOL DualNet); /* * public functions in skxmac2.c */ -extern void SkXmSetRxCmd( - SK_AC *pAC, - SK_IOC IoC, - int Port, - int Mode); +extern void SkMacRxTxDisable( + SK_AC *pAC, + SK_IOC IoC, + int Port); -extern void SkXmClrExactAddr( - SK_AC *pAC, - SK_IOC IoC, +extern void SkMacSoftRst( + SK_AC *pAC, + SK_IOC IoC, + int Port); + +extern void SkMacHardRst( + SK_AC *pAC, + SK_IOC IoC, + int Port); + +extern void SkXmInitMac( + SK_AC *pAC, + SK_IOC IoC, + int Port); + +extern void SkGmInitMac( + SK_AC *pAC, + SK_IOC IoC, + int Port); + +extern void SkMacInitPhy( + SK_AC *pAC, + SK_IOC IoC, int Port, - int StartNum, - int StopNum); + SK_BOOL DoLoop); -extern void SkXmFlushTxFifo( - SK_AC *pAC, - SK_IOC IoC, +extern void SkMacIrqDisable( + SK_AC *pAC, + SK_IOC IoC, int Port); -extern void SkXmFlushRxFifo( - SK_AC *pAC, - SK_IOC IoC, +extern void SkMacFlushTxFifo( + SK_AC *pAC, + SK_IOC IoC, int Port); -extern void SkXmSoftRst( - SK_AC *pAC, - SK_IOC IoC, +extern void SkMacFlushRxFifo( + SK_AC *pAC, + SK_IOC IoC, int Port); -extern void SkXmHardRst( - SK_AC *pAC, - SK_IOC IoC, +extern void SkMacIrq( + SK_AC *pAC, + SK_IOC IoC, int Port); -extern void SkXmInitMac( - SK_AC *pAC, - SK_IOC IoC, +extern int SkMacAutoNegDone( + SK_AC *pAC, + SK_IOC IoC, int Port); -extern void SkXmInitDupMd( - SK_AC *pAC, - SK_IOC IoC, +extern void SkMacAutoNegLipaPhy( + SK_AC *pAC, + SK_IOC IoC, + int Port, + SK_U16 IStatus); + +extern void SkMacSetRxTxEn( + SK_AC *pAC, + SK_IOC IoC, + int Port, + int Para); + +extern int SkMacRxTxEnable( + SK_AC *pAC, + SK_IOC IoC, int Port); -extern void SkXmInitPauseMd( - SK_AC *pAC, - SK_IOC IoC, +extern void SkMacPromiscMode( + SK_AC *pAC, + SK_IOC IoC, + int Port, + SK_BOOL Enable); + +extern void SkMacHashing( + SK_AC *pAC, + SK_IOC IoC, + int Port, + SK_BOOL Enable); + +extern void SkXmPhyRead( + SK_AC *pAC, + SK_IOC IoC, + int Port, + int Addr, + SK_U16 *pVal); + +extern void SkXmPhyWrite( + SK_AC *pAC, + SK_IOC IoC, + int Port, + int Addr, + SK_U16 Val); + +extern void SkGmPhyRead( + SK_AC *pAC, + SK_IOC IoC, + int Port, + int Addr, + SK_U16 *pVal); + +extern void SkGmPhyWrite( + SK_AC *pAC, + SK_IOC IoC, + int Port, + int Addr, + SK_U16 Val); + +extern void SkGePhyRead( + SK_AC *pAC, + SK_IOC IoC, + int Port, + int Addr, + SK_U16 *pVal); + +extern void SkGePhyWrite( + SK_AC *pAC, + SK_IOC IoC, + int Port, + int Addr, + SK_U16 Val); + +extern void SkXmClrExactAddr( + SK_AC *pAC, + SK_IOC IoC, + int Port, + int StartNum, + int StopNum); + +extern void SkXmInitDupMd( + SK_AC *pAC, + SK_IOC IoC, int Port); -extern int SkXmAutoNegDone( - SK_AC *pAC, - SK_IOC IoC, +extern void SkXmInitPauseMd( + SK_AC *pAC, + SK_IOC IoC, int Port); extern void SkXmAutoNegLipaXmac( - SK_AC *pAC, - SK_IOC IoC, + SK_AC *pAC, + SK_IOC IoC, int Port, - SK_U16 IStatus); + SK_U16 IStatus); -extern void SkXmAutoNegLipaBcom( - SK_AC *pAC, - SK_IOC IoC, +extern int SkXmUpdateStats( + SK_AC *pAC, + SK_IOC IoC, + unsigned int Port); + +extern int SkGmUpdateStats( + SK_AC *pAC, + SK_IOC IoC, + unsigned int Port); + +extern int SkXmMacStatistic( + SK_AC *pAC, + SK_IOC IoC, + unsigned int Port, + SK_U16 StatAddr, + SK_U32 *pVal); + +extern int SkGmMacStatistic( + SK_AC *pAC, + SK_IOC IoC, + unsigned int Port, + SK_U16 StatAddr, + SK_U32 *pVal); + +extern int SkXmResetCounter( + SK_AC *pAC, + SK_IOC IoC, + unsigned int Port); + +extern int SkGmResetCounter( + SK_AC *pAC, + SK_IOC IoC, + unsigned int Port); + +extern int SkXmOverflowStatus( + SK_AC *pAC, + SK_IOC IoC, + unsigned int Port, + SK_U16 IStatus, + SK_U64 *pStatus); + +extern int SkGmOverflowStatus( + SK_AC *pAC, + SK_IOC IoC, + unsigned int Port, + SK_U16 MacStatus, + SK_U64 *pStatus); + +extern int SkGmCableDiagStatus( + SK_AC *pAC, + SK_IOC IoC, int Port, - SK_U16 IStatus); + SK_BOOL StartTest); -extern void SkXmAutoNegLipaLone( - SK_AC *pAC, - SK_IOC IoC, +#ifdef SK_DIAG +extern void SkMacSetRxCmd( + SK_AC *pAC, + SK_IOC IoC, int Port, - SK_U16 IStatus); - -extern void SkXmIrq( - SK_AC *pAC, - SK_IOC IoC, + int Mode); +extern void SkMacCrcGener( + SK_AC *pAC, + SK_IOC IoC, int Port, - SK_U16 IStatus); + SK_BOOL Enable); +extern void SkMacTimeStamp( + SK_AC *pAC, + SK_IOC IoC, + int Port, + SK_BOOL Enable); +extern void SkXmSendCont( + SK_AC *pAC, + SK_IOC IoC, + int Port, + SK_BOOL Enable); +#endif /* SK_DIAG */ #else /* SK_KR_PROTO */ @@ -745,22 +1064,53 @@ extern int SkGeInitPort(); extern void SkGeXmitLED(); extern void SkGeInitRamIface(); +extern int SkGeInitAssignRamToQueues(); /* * public functions in skxmac2.c */ -extern void SkXmSetRxCmd(); -extern void SkXmClrExactAddr(); -extern void SkXmFlushTxFifo(); -extern void SkXmFlushRxFifo(); -extern void SkXmSoftRst(); -extern void SkXmHardRst(); +extern void SkMacRxTxDisable(); +extern void SkMacSoftRst(); +extern void SkMacHardRst(); +extern void SkMacInitPhy(); +extern int SkMacRxTxEnable(); +extern void SkMacPromiscMode(); +extern void SkMacHashing(); +extern void SkMacIrqDisable(); +extern void SkMacFlushTxFifo(); +extern void SkMacFlushRxFifo(); +extern void SkMacIrq(); +extern int SkMacAutoNegDone(); +extern void SkMacAutoNegLipaPhy(); +extern void SkMacSetRxTxEn(); +extern void SkGePhyRead(); +extern void SkGePhyWrite(); extern void SkXmInitMac(); +extern void SkXmPhyRead(); +extern void SkXmPhyWrite(); +extern void SkGmInitMac(); +extern void SkGmPhyRead(); +extern void SkGmPhyWrite(); +extern void SkXmClrExactAddr(); extern void SkXmInitDupMd(); extern void SkXmInitPauseMd(); -extern int SkXmAutoNegDone(); -extern void SkXmAutoNegLipa(); -extern void SkXmIrq(); +extern void SkXmAutoNegLipaXmac(); +extern int SkXmUpdateStats(); +extern int SkGmUpdateStats(); +extern int SkXmMacStatistic(); +extern int SkGmMacStatistic(); +extern int SkXmResetCounter(); +extern int SkGmResetCounter(); +extern int SkXmOverflowStatus(); +extern int SkGmOverflowStatus(); +extern int SkGmCableDiagStatus(); + +#ifdef SK_DIAG +extern void SkMacSetRxCmd(); +extern void SkMacCrcGener(); +extern void SkMacTimeStamp(); +extern void SkXmSendCont(); +#endif /* SK_DIAG */ #endif /* SK_KR_PROTO */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/skgepnm2.h linux.21pre4-ac2/drivers/net/sk98lin/h/skgepnm2.h --- linux.21pre4/drivers/net/sk98lin/h/skgepnm2.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/skgepnm2.h 2003-01-06 15:38:18.000000000 +0000 @@ -2,8 +2,8 @@ * * Name: skgepnm2.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.30 $ - * Date: $Date: 2001/02/06 10:03:41 $ + * Version: $Revision: 1.34 $ + * Date: $Date: 2002/12/16 09:05:18 $ * Purpose: Defines for Private Network Management Interface * ****************************************************************************/ @@ -26,6 +26,21 @@ * History: * * $Log: skgepnm2.h,v $ + * Revision 1.34 2002/12/16 09:05:18 tschilli + * Code for VCT handling added. + * + * Revision 1.33 2002/09/10 09:00:03 rwahl + * Adapted boolean definitions according sktypes. + * + * Revision 1.32 2002/08/09 09:47:01 rwahl + * Added write-only flag to oid access defines. + * Editorial changes. + * + * Revision 1.31 2002/07/17 19:23:18 rwahl + * - Replaced MAC counter definitions by enumeration. + * - Added definition SK_PNMI_MAC_TYPES. + * - Added chipset defnition for Yukon. + * * Revision 1.30 2001/02/06 10:03:41 mkunz * - Pnmi V4 dual net support added. Interface functions and macros extended * - Vpd bug fixed @@ -140,26 +155,19 @@ #ifndef _SKGEPNM2_H_ #define _SKGEPNM2_H_ -#ifndef FALSE -#define FALSE 0 -#endif - -#ifndef TRUE -#define TRUE !(FALSE) -#endif - /* * General definitions */ -#define SK_PNMI_CHIPSET 1 /* XMAC11800FP */ +#define SK_PNMI_CHIPSET_XMAC 1 /* XMAC11800FP */ +#define SK_PNMI_CHIPSET_YUKON 2 /* YUKON */ #define SK_PNMI_BUS_PCI 1 /* PCI bus*/ /* * Actions */ -#define SK_PNMI_ACT_IDLE 1 -#define SK_PNMI_ACT_RESET 2 +#define SK_PNMI_ACT_IDLE 1 +#define SK_PNMI_ACT_RESET 2 #define SK_PNMI_ACT_SELFTEST 3 #define SK_PNMI_ACT_RESETCNT 4 @@ -170,13 +178,13 @@ #define SK_PNMI_VPD_RW 1 #define SK_PNMI_VPD_RO 2 -#define SK_PNMI_VPD_OK 0 +#define SK_PNMI_VPD_OK 0 #define SK_PNMI_VPD_NOTFOUND 1 -#define SK_PNMI_VPD_CUT 2 -#define SK_PNMI_VPD_TIMEOUT 3 -#define SK_PNMI_VPD_FULL 4 -#define SK_PNMI_VPD_NOWRITE 5 -#define SK_PNMI_VPD_FATAL 6 +#define SK_PNMI_VPD_CUT 2 +#define SK_PNMI_VPD_TIMEOUT 3 +#define SK_PNMI_VPD_FULL 4 +#define SK_PNMI_VPD_NOWRITE 5 +#define SK_PNMI_VPD_FATAL 6 #define SK_PNMI_VPD_IGNORE 0 #define SK_PNMI_VPD_CREATE 1 @@ -188,124 +196,137 @@ */ #define SK_PNMI_DEF_RLMT_CHG_THRES 240 /* 4 changes per minute */ + +/* + * VCT internal status values + */ +#define SK_PNMI_VCT_PENDING 32 +#define SK_PNMI_VCT_TEST_DONE 64 +#define SK_PNMI_VCT_LINK 128 + /* * Internal table definitions */ #define SK_PNMI_GET 0 -#define SK_PNMI_PRESET 1 +#define SK_PNMI_PRESET 1 #define SK_PNMI_SET 2 #define SK_PNMI_RO 0 #define SK_PNMI_RW 1 +#define SK_PNMI_WO 2 typedef struct s_OidTabEntry { - SK_U32 Id; - SK_U32 InstanceNo; + SK_U32 Id; + SK_U32 InstanceNo; unsigned int StructSize; unsigned int Offset; - int Access; - int (* Func)(SK_AC *pAc, SK_IOC pIo, int action, - SK_U32 Id, char* pBuf, unsigned int* pLen, - SK_U32 Instance, unsigned int TableIndex, - SK_U32 NetNumber); - SK_U16 Param; + int Access; + int (* Func)(SK_AC *pAc, SK_IOC pIo, int action, + SK_U32 Id, char* pBuf, unsigned int* pLen, + SK_U32 Instance, unsigned int TableIndex, + SK_U32 NetNumber); + SK_U16 Param; } SK_PNMI_TAB_ENTRY; /* * Trap lengths */ -#define SK_PNMI_TRAP_SIMPLE_LEN 17 +#define SK_PNMI_TRAP_SIMPLE_LEN 17 #define SK_PNMI_TRAP_SENSOR_LEN_BASE 46 #define SK_PNMI_TRAP_RLMT_CHANGE_LEN 23 -#define SK_PNMI_TRAP_RLMT_PORT_LEN 23 - +#define SK_PNMI_TRAP_RLMT_PORT_LEN 23 /* - * MAC statistic data structures - * Only for the first 64 counters: the number relates to the bit in the - * XMAC overflow status register - */ -#define SK_PNMI_HTX 0 -#define SK_PNMI_HTX_OCTET 1 -#define SK_PNMI_HTX_OCTETHIGH 1 -#define SK_PNMI_HTX_OCTETLOW 2 -#define SK_PNMI_HTX_BROADCAST 3 -#define SK_PNMI_HTX_MULTICAST 4 -#define SK_PNMI_HTX_UNICAST 5 -#define SK_PNMI_HTX_LONGFRAMES 6 -#define SK_PNMI_HTX_BURST 7 -#define SK_PNMI_HTX_PMACC 8 -#define SK_PNMI_HTX_MACC 9 -#define SK_PNMI_HTX_SINGLE_COL 10 -#define SK_PNMI_HTX_MULTI_COL 11 -#define SK_PNMI_HTX_EXCESS_COL 12 -#define SK_PNMI_HTX_LATE_COL 13 -#define SK_PNMI_HTX_DEFFERAL 14 -#define SK_PNMI_HTX_EXCESS_DEF 15 -#define SK_PNMI_HTX_UNDERRUN 16 -#define SK_PNMI_HTX_CARRIER 17 -#define SK_PNMI_HTX_UTILUNDER 18 -#define SK_PNMI_HTX_UTILOVER 19 -#define SK_PNMI_HTX_64 20 -#define SK_PNMI_HTX_127 21 -#define SK_PNMI_HTX_255 22 -#define SK_PNMI_HTX_511 23 -#define SK_PNMI_HTX_1023 24 -#define SK_PNMI_HTX_MAX 25 -#define SK_PNMI_HTX_RESERVED26 26 -#define SK_PNMI_HTX_RESERVED27 27 -#define SK_PNMI_HTX_RESERVED28 28 -#define SK_PNMI_HTX_RESERVED29 29 -#define SK_PNMI_HTX_RESERVED30 30 -#define SK_PNMI_HTX_RESERVED31 31 -#define SK_PNMI_HRX (32 + 0) -#define SK_PNMI_HRX_OCTET (32 + 1) -#define SK_PNMI_HRX_OCTETHIGH (32 + 1) -#define SK_PNMI_HRX_OCTETLOW (32 + 2) -#define SK_PNMI_HRX_BROADCAST (32 + 3) -#define SK_PNMI_HRX_MULTICAST (32 + 4) -#define SK_PNMI_HRX_UNICAST (32 + 5) -#define SK_PNMI_HRX_PMACC (32 + 6) -#define SK_PNMI_HRX_MACC (32 + 7) -#define SK_PNMI_HRX_PMACC_ERR (32 + 8) -#define SK_PNMI_HRX_MACC_UNKWN (32 + 9) -#define SK_PNMI_HRX_BURST (32 + 10) -#define SK_PNMI_HRX_MISSED (32 + 11) -#define SK_PNMI_HRX_FRAMING (32 + 12) -#define SK_PNMI_HRX_OVERFLOW (32 + 13) -#define SK_PNMI_HRX_JABBER (32 + 14) -#define SK_PNMI_HRX_CARRIER (32 + 15) -#define SK_PNMI_HRX_IRLENGTH (32 + 16) -#define SK_PNMI_HRX_SYMBOL (32 + 17) -#define SK_PNMI_HRX_SHORTS (32 + 18) -#define SK_PNMI_HRX_RUNT (32 + 19) -#define SK_PNMI_HRX_TOO_LONG (32 + 20) -#define SK_PNMI_HRX_FCS (32 + 21) -#define SK_PNMI_HRX_RESERVED22 (32 + 22) -#define SK_PNMI_HRX_CEXT (32 + 23) -#define SK_PNMI_HRX_UTILUNDER (32 + 24) -#define SK_PNMI_HRX_UTILOVER (32 + 25) -#define SK_PNMI_HRX_64 (32 + 26) -#define SK_PNMI_HRX_127 (32 + 27) -#define SK_PNMI_HRX_255 (32 + 28) -#define SK_PNMI_HRX_511 (32 + 29) -#define SK_PNMI_HRX_1023 (32 + 30) -#define SK_PNMI_HRX_MAX (32 + 31) - -#define SK_PNMI_HTX_SYNC 64 -#define SK_PNMI_HTX_SYNC_OCTET 65 - -#define SK_PNMI_HRX_LONGFRAMES 66 + * Number of MAC types supported + */ +#define SK_PNMI_MAC_TYPES (SK_MAC_GMAC + 1) -#define SK_PNMI_MAX_IDX (SK_PNMI_CNT_NO) +/* + * MAC statistic data list (overall set for MAC types used) + */ +enum SK_MACSTATS { + SK_PNMI_HTX = 0, + SK_PNMI_HTX_OCTET, + SK_PNMI_HTX_OCTETHIGH = SK_PNMI_HTX_OCTET, + SK_PNMI_HTX_OCTETLOW, + SK_PNMI_HTX_BROADCAST, + SK_PNMI_HTX_MULTICAST, + SK_PNMI_HTX_UNICAST, + SK_PNMI_HTX_BURST, + SK_PNMI_HTX_PMACC, + SK_PNMI_HTX_MACC, + SK_PNMI_HTX_COL, + SK_PNMI_HTX_SINGLE_COL, + SK_PNMI_HTX_MULTI_COL, + SK_PNMI_HTX_EXCESS_COL, + SK_PNMI_HTX_LATE_COL, + SK_PNMI_HTX_DEFFERAL, + SK_PNMI_HTX_EXCESS_DEF, + SK_PNMI_HTX_UNDERRUN, + SK_PNMI_HTX_CARRIER, + SK_PNMI_HTX_UTILUNDER, + SK_PNMI_HTX_UTILOVER, + SK_PNMI_HTX_64, + SK_PNMI_HTX_127, + SK_PNMI_HTX_255, + SK_PNMI_HTX_511, + SK_PNMI_HTX_1023, + SK_PNMI_HTX_MAX, + SK_PNMI_HTX_LONGFRAMES, + SK_PNMI_HTX_SYNC, + SK_PNMI_HTX_SYNC_OCTET, + SK_PNMI_HTX_RESERVED, + + SK_PNMI_HRX, + SK_PNMI_HRX_OCTET, + SK_PNMI_HRX_OCTETHIGH = SK_PNMI_HRX_OCTET, + SK_PNMI_HRX_OCTETLOW, + SK_PNMI_HRX_BADOCTET, + SK_PNMI_HRX_BADOCTETHIGH = SK_PNMI_HRX_BADOCTET, + SK_PNMI_HRX_BADOCTETLOW, + SK_PNMI_HRX_BROADCAST, + SK_PNMI_HRX_MULTICAST, + SK_PNMI_HRX_UNICAST, + SK_PNMI_HRX_PMACC, + SK_PNMI_HRX_MACC, + SK_PNMI_HRX_PMACC_ERR, + SK_PNMI_HRX_MACC_UNKWN, + SK_PNMI_HRX_BURST, + SK_PNMI_HRX_MISSED, + SK_PNMI_HRX_FRAMING, + SK_PNMI_HRX_UNDERSIZE, + SK_PNMI_HRX_OVERFLOW, + SK_PNMI_HRX_JABBER, + SK_PNMI_HRX_CARRIER, + SK_PNMI_HRX_IRLENGTH, + SK_PNMI_HRX_SYMBOL, + SK_PNMI_HRX_SHORTS, + SK_PNMI_HRX_RUNT, + SK_PNMI_HRX_TOO_LONG, + SK_PNMI_HRX_FCS, + SK_PNMI_HRX_CEXT, + SK_PNMI_HRX_UTILUNDER, + SK_PNMI_HRX_UTILOVER, + SK_PNMI_HRX_64, + SK_PNMI_HRX_127, + SK_PNMI_HRX_255, + SK_PNMI_HRX_511, + SK_PNMI_HRX_1023, + SK_PNMI_HRX_MAX, + SK_PNMI_HRX_LONGFRAMES, + + SK_PNMI_HRX_RESERVED, + + SK_PNMI_MAX_IDX /* NOTE: Ensure SK_PNMI_CNT_NO is set to this value */ +}; /* * MAC specific data */ typedef struct s_PnmiStatAddr { - SK_BOOL GetOffset; /* TRUE: Call GetStatVal function */ - SK_U16 Param; /* XMAC register containing value */ + SK_U16 Reg; /* MAC register containing the value */ + SK_BOOL GetOffset; /* TRUE: Offset managed by PNMI (call GetStatVal())*/ } SK_PNMI_STATADDR; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/skgepnmi.h linux.21pre4-ac2/drivers/net/sk98lin/h/skgepnmi.h --- linux.21pre4/drivers/net/sk98lin/h/skgepnmi.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/skgepnmi.h 2003-01-06 15:38:18.000000000 +0000 @@ -2,8 +2,8 @@ * * Name: skgepnmi.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.48 $ - * Date: $Date: 2001/02/23 14:34:24 $ + * Version: $Revision: 1.59 $ + * Date: $Date: 2002/12/16 14:03:50 $ * Purpose: Defines for Private Network Management Interface * ****************************************************************************/ @@ -26,6 +26,43 @@ * History: * * $Log: skgepnmi.h,v $ + * Revision 1.59 2002/12/16 14:03:50 tschilli + * New defines for VCT added. + * + * Revision 1.58 2002/12/16 09:04:59 tschilli + * Code for VCT handling added. + * + * Revision 1.57 2002/09/26 12:41:05 tschilli + * SK_PNMI_PORT BufPort entry in struct SK_PNMI added. + * + * Revision 1.56 2002/08/16 11:10:41 rwahl + * - Replaced c++ comment. + * + * Revision 1.55 2002/08/09 15:40:21 rwahl + * Editorial change (renamed ConfSpeedCap). + * + * Revision 1.54 2002/08/09 11:06:07 rwahl + * Added OID_SKGE_SPEED_CAP. + * + * Revision 1.53 2002/08/09 09:45:28 rwahl + * Added support for NDIS OID_PNP_xxx. + * Editorial changes. + * + * Revision 1.52 2002/08/06 17:54:07 rwahl + * - Added speed cap to PNMI config struct. + * + * Revision 1.51 2002/07/17 19:19:26 rwahl + * - Added OID_SKGE_SPEED_MODE and OID_SKGE_SPEED_STATUS. + * - Added SK_PNMI_CNT_RX_PMACC_ERR() & SK_PNMI_CNT_RX_LONGFRAMES(). + * - Added speed mode & status to PNMI config struct. + * - Editorial changes. + * + * Revision 1.50 2002/05/22 08:59:37 rwahl + * Added string definitions for error msgs. + * + * Revision 1.49 2001/11/20 09:23:50 rwahl + * - pnmi struct: reordered and aligned to 32bit. + * * Revision 1.48 2001/02/23 14:34:24 mkunz * Changed macro PHYS2INST. Added pAC to Interface * @@ -233,12 +270,14 @@ #define SK_PNMI_EVT_RLMT_ACTIVE_UP 15 /* Port came logically up */ #define SK_PNMI_EVT_RLMT_SET_NETS 16 /* 1. Parameter is number of nets 1 = single net; 2 = dual net */ +#define SK_PNMI_EVT_VCT_RESET 17 /* VCT port reset timer event started with SET. */ + /* * Return values */ -#define SK_PNMI_ERR_OK 0 -#define SK_PNMI_ERR_GENERAL 1 +#define SK_PNMI_ERR_OK 0 +#define SK_PNMI_ERR_GENERAL 1 #define SK_PNMI_ERR_TOO_SHORT 2 #define SK_PNMI_ERR_BAD_VALUE 3 #define SK_PNMI_ERR_READ_ONLY 4 @@ -290,11 +329,11 @@ */ #ifndef _NDIS_ /* Check, whether NDIS already included OIDs */ -#define OID_GEN_XMIT_OK 0x00020101 -#define OID_GEN_RCV_OK 0x00020102 -#define OID_GEN_XMIT_ERROR 0x00020103 -#define OID_GEN_RCV_ERROR 0x00020104 -#define OID_GEN_RCV_NO_BUFFER 0x00020105 +#define OID_GEN_XMIT_OK 0x00020101 +#define OID_GEN_RCV_OK 0x00020102 +#define OID_GEN_XMIT_ERROR 0x00020103 +#define OID_GEN_RCV_ERROR 0x00020104 +#define OID_GEN_RCV_NO_BUFFER 0x00020105 /* #define OID_GEN_DIRECTED_BYTES_XMIT 0x00020201 */ #define OID_GEN_DIRECTED_FRAMES_XMIT 0x00020202 @@ -303,211 +342,256 @@ /* #define OID_GEN_BROADCAST_BYTES_XMIT 0x00020205 */ #define OID_GEN_BROADCAST_FRAMES_XMIT 0x00020206 /* #define OID_GEN_DIRECTED_BYTES_RCV 0x00020207 */ -#define OID_GEN_DIRECTED_FRAMES_RCV 0x00020208 +#define OID_GEN_DIRECTED_FRAMES_RCV 0x00020208 /* #define OID_GEN_MULTICAST_BYTES_RCV 0x00020209 */ #define OID_GEN_MULTICAST_FRAMES_RCV 0x0002020A /* #define OID_GEN_BROADCAST_BYTES_RCV 0x0002020B */ #define OID_GEN_BROADCAST_FRAMES_RCV 0x0002020C -#define OID_GEN_RCV_CRC_ERROR 0x0002020D +#define OID_GEN_RCV_CRC_ERROR 0x0002020D #define OID_GEN_TRANSMIT_QUEUE_LENGTH 0x0002020E -#define OID_802_3_PERMANENT_ADDRESS 0x01010101 -#define OID_802_3_CURRENT_ADDRESS 0x01010102 -/* #define OID_802_3_MULTICAST_LIST 0x01010103 */ +#define OID_802_3_PERMANENT_ADDRESS 0x01010101 +#define OID_802_3_CURRENT_ADDRESS 0x01010102 +/* #define OID_802_3_MULTICAST_LIST 0x01010103 */ /* #define OID_802_3_MAXIMUM_LIST_SIZE 0x01010104 */ -/* #define OID_802_3_MAC_OPTIONS 0x01010105 */ +/* #define OID_802_3_MAC_OPTIONS 0x01010105 */ #define OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101 #define OID_802_3_XMIT_ONE_COLLISION 0x01020102 #define OID_802_3_XMIT_MORE_COLLISIONS 0x01020103 -#define OID_802_3_XMIT_DEFERRED 0x01020201 +#define OID_802_3_XMIT_DEFERRED 0x01020201 #define OID_802_3_XMIT_MAX_COLLISIONS 0x01020202 -#define OID_802_3_RCV_OVERRUN 0x01020203 -#define OID_802_3_XMIT_UNDERRUN 0x01020204 +#define OID_802_3_RCV_OVERRUN 0x01020203 +#define OID_802_3_XMIT_UNDERRUN 0x01020204 #define OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206 #define OID_802_3_XMIT_LATE_COLLISIONS 0x01020207 +/* + * PnP and PM OIDs + */ +#ifdef SK_POWER_MGMT +#define OID_PNP_CAPABILITIES 0xFD010100 +#define OID_PNP_SET_POWER 0xFD010101 +#define OID_PNP_QUERY_POWER 0xFD010102 +#define OID_PNP_ADD_WAKE_UP_PATTERN 0xFD010103 +#define OID_PNP_REMOVE_WAKE_UP_PATTERN 0xFD010104 +#define OID_PNP_ENABLE_WAKE_UP 0xFD010106 +#endif /* SK_POWER_MGMT */ + #endif /* _NDIS_ */ -#define OID_SKGE_MDB_VERSION 0xFF010100 -#define OID_SKGE_SUPPORTED_LIST 0xFF010101 -#define OID_SKGE_VPD_FREE_BYTES 0xFF010102 -#define OID_SKGE_VPD_ENTRIES_LIST 0xFF010103 -#define OID_SKGE_VPD_ENTRIES_NUMBER 0xFF010104 -#define OID_SKGE_VPD_KEY 0xFF010105 -#define OID_SKGE_VPD_VALUE 0xFF010106 -#define OID_SKGE_VPD_ACCESS 0xFF010107 -#define OID_SKGE_VPD_ACTION 0xFF010108 +#define OID_SKGE_MDB_VERSION 0xFF010100 +#define OID_SKGE_SUPPORTED_LIST 0xFF010101 +#define OID_SKGE_VPD_FREE_BYTES 0xFF010102 +#define OID_SKGE_VPD_ENTRIES_LIST 0xFF010103 +#define OID_SKGE_VPD_ENTRIES_NUMBER 0xFF010104 +#define OID_SKGE_VPD_KEY 0xFF010105 +#define OID_SKGE_VPD_VALUE 0xFF010106 +#define OID_SKGE_VPD_ACCESS 0xFF010107 +#define OID_SKGE_VPD_ACTION 0xFF010108 -#define OID_SKGE_PORT_NUMBER 0xFF010110 -#define OID_SKGE_DEVICE_TYPE 0xFF010111 -#define OID_SKGE_DRIVER_DESCR 0xFF010112 -#define OID_SKGE_DRIVER_VERSION 0xFF010113 -#define OID_SKGE_HW_DESCR 0xFF010114 -#define OID_SKGE_HW_VERSION 0xFF010115 -#define OID_SKGE_CHIPSET 0xFF010116 -#define OID_SKGE_ACTION 0xFF010117 -#define OID_SKGE_RESULT 0xFF010118 -#define OID_SKGE_BUS_TYPE 0xFF010119 -#define OID_SKGE_BUS_SPEED 0xFF01011A -#define OID_SKGE_BUS_WIDTH 0xFF01011B - -/*#define OID_SKGE_MULTICAST_LIST 0xFF01011C*/ - -#define OID_SKGE_SENSOR_NUMBER 0xFF020100 -#define OID_SKGE_SENSOR_INDEX 0xFF020101 -#define OID_SKGE_SENSOR_DESCR 0xFF020102 -#define OID_SKGE_SENSOR_TYPE 0xFF020103 -#define OID_SKGE_SENSOR_VALUE 0xFF020104 +#define OID_SKGE_PORT_NUMBER 0xFF010110 +#define OID_SKGE_DEVICE_TYPE 0xFF010111 +#define OID_SKGE_DRIVER_DESCR 0xFF010112 +#define OID_SKGE_DRIVER_VERSION 0xFF010113 +#define OID_SKGE_HW_DESCR 0xFF010114 +#define OID_SKGE_HW_VERSION 0xFF010115 +#define OID_SKGE_CHIPSET 0xFF010116 +#define OID_SKGE_ACTION 0xFF010117 +#define OID_SKGE_RESULT 0xFF010118 +#define OID_SKGE_BUS_TYPE 0xFF010119 +#define OID_SKGE_BUS_SPEED 0xFF01011A +#define OID_SKGE_BUS_WIDTH 0xFF01011B +/* 0xFF01011C unused */ +#define OID_SKGE_DIAG_ACTION 0xFF01011D +#define OID_SKGE_DIAG_RESULT 0xFF01011E +#define OID_SKGE_MTU 0xFF01011F +#define OID_SKGE_PHYS_CUR_ADDR 0xFF010120 +#define OID_SKGE_PHYS_FAC_ADDR 0xFF010121 +#define OID_SKGE_PMD 0xFF010122 +#define OID_SKGE_CONNECTOR 0xFF010123 +#define OID_SKGE_LINK_CAP 0xFF010124 +#define OID_SKGE_LINK_MODE 0xFF010125 +#define OID_SKGE_LINK_MODE_STATUS 0xFF010126 +#define OID_SKGE_LINK_STATUS 0xFF010127 +#define OID_SKGE_FLOWCTRL_CAP 0xFF010128 +#define OID_SKGE_FLOWCTRL_MODE 0xFF010129 +#define OID_SKGE_FLOWCTRL_STATUS 0xFF01012A +#define OID_SKGE_PHY_OPERATION_CAP 0xFF01012B +#define OID_SKGE_PHY_OPERATION_MODE 0xFF01012C +#define OID_SKGE_PHY_OPERATION_STATUS 0xFF01012D +#define OID_SKGE_MULTICAST_LIST 0xFF01012E +#define OID_SKGE_CURRENT_PACKET_FILTER 0xFF01012F + +#define OID_SKGE_TRAP 0xFF010130 +#define OID_SKGE_TRAP_NUMBER 0xFF010131 + +#define OID_SKGE_RLMT_MODE 0xFF010140 +#define OID_SKGE_RLMT_PORT_NUMBER 0xFF010141 +#define OID_SKGE_RLMT_PORT_ACTIVE 0xFF010142 +#define OID_SKGE_RLMT_PORT_PREFERRED 0xFF010143 +#define OID_SKGE_INTERMEDIATE_SUPPORT 0xFF010160 + +#define OID_SKGE_SPEED_CAP 0xFF010170 +#define OID_SKGE_SPEED_MODE 0xFF010171 +#define OID_SKGE_SPEED_STATUS 0xFF010172 + +#define OID_SKGE_SENSOR_NUMBER 0xFF020100 +#define OID_SKGE_SENSOR_INDEX 0xFF020101 +#define OID_SKGE_SENSOR_DESCR 0xFF020102 +#define OID_SKGE_SENSOR_TYPE 0xFF020103 +#define OID_SKGE_SENSOR_VALUE 0xFF020104 #define OID_SKGE_SENSOR_WAR_THRES_LOW 0xFF020105 #define OID_SKGE_SENSOR_WAR_THRES_UPP 0xFF020106 #define OID_SKGE_SENSOR_ERR_THRES_LOW 0xFF020107 #define OID_SKGE_SENSOR_ERR_THRES_UPP 0xFF020108 -#define OID_SKGE_SENSOR_STATUS 0xFF020109 -#define OID_SKGE_SENSOR_WAR_CTS 0xFF02010A -#define OID_SKGE_SENSOR_ERR_CTS 0xFF02010B -#define OID_SKGE_SENSOR_WAR_TIME 0xFF02010C -#define OID_SKGE_SENSOR_ERR_TIME 0xFF02010D +#define OID_SKGE_SENSOR_STATUS 0xFF020109 +#define OID_SKGE_SENSOR_WAR_CTS 0xFF02010A +#define OID_SKGE_SENSOR_ERR_CTS 0xFF02010B +#define OID_SKGE_SENSOR_WAR_TIME 0xFF02010C +#define OID_SKGE_SENSOR_ERR_TIME 0xFF02010D -#define OID_SKGE_CHKSM_NUMBER 0xFF020110 -#define OID_SKGE_CHKSM_RX_OK_CTS 0xFF020111 +#define OID_SKGE_CHKSM_NUMBER 0xFF020110 +#define OID_SKGE_CHKSM_RX_OK_CTS 0xFF020111 #define OID_SKGE_CHKSM_RX_UNABLE_CTS 0xFF020112 -#define OID_SKGE_CHKSM_RX_ERR_CTS 0xFF020113 -#define OID_SKGE_CHKSM_TX_OK_CTS 0xFF020114 +#define OID_SKGE_CHKSM_RX_ERR_CTS 0xFF020113 +#define OID_SKGE_CHKSM_TX_OK_CTS 0xFF020114 #define OID_SKGE_CHKSM_TX_UNABLE_CTS 0xFF020115 -#define OID_SKGE_STAT_TX 0xFF020120 -#define OID_SKGE_STAT_TX_OCTETS 0xFF020121 -#define OID_SKGE_STAT_TX_BROADCAST 0xFF020122 -#define OID_SKGE_STAT_TX_MULTICAST 0xFF020123 -#define OID_SKGE_STAT_TX_UNICAST 0xFF020124 -#define OID_SKGE_STAT_TX_LONGFRAMES 0xFF020125 -#define OID_SKGE_STAT_TX_BURST 0xFF020126 -#define OID_SKGE_STAT_TX_PFLOWC 0xFF020127 -#define OID_SKGE_STAT_TX_FLOWC 0xFF020128 -#define OID_SKGE_STAT_TX_SINGLE_COL 0xFF020129 -#define OID_SKGE_STAT_TX_MULTI_COL 0xFF02012A -#define OID_SKGE_STAT_TX_EXCESS_COL 0xFF02012B -#define OID_SKGE_STAT_TX_LATE_COL 0xFF02012C -#define OID_SKGE_STAT_TX_DEFFERAL 0xFF02012D -#define OID_SKGE_STAT_TX_EXCESS_DEF 0xFF02012E -#define OID_SKGE_STAT_TX_UNDERRUN 0xFF02012F -#define OID_SKGE_STAT_TX_CARRIER 0xFF020130 +#define OID_SKGE_STAT_TX 0xFF020120 +#define OID_SKGE_STAT_TX_OCTETS 0xFF020121 +#define OID_SKGE_STAT_TX_BROADCAST 0xFF020122 +#define OID_SKGE_STAT_TX_MULTICAST 0xFF020123 +#define OID_SKGE_STAT_TX_UNICAST 0xFF020124 +#define OID_SKGE_STAT_TX_LONGFRAMES 0xFF020125 +#define OID_SKGE_STAT_TX_BURST 0xFF020126 +#define OID_SKGE_STAT_TX_PFLOWC 0xFF020127 +#define OID_SKGE_STAT_TX_FLOWC 0xFF020128 +#define OID_SKGE_STAT_TX_SINGLE_COL 0xFF020129 +#define OID_SKGE_STAT_TX_MULTI_COL 0xFF02012A +#define OID_SKGE_STAT_TX_EXCESS_COL 0xFF02012B +#define OID_SKGE_STAT_TX_LATE_COL 0xFF02012C +#define OID_SKGE_STAT_TX_DEFFERAL 0xFF02012D +#define OID_SKGE_STAT_TX_EXCESS_DEF 0xFF02012E +#define OID_SKGE_STAT_TX_UNDERRUN 0xFF02012F +#define OID_SKGE_STAT_TX_CARRIER 0xFF020130 /* #define OID_SKGE_STAT_TX_UTIL 0xFF020131 */ -#define OID_SKGE_STAT_TX_64 0xFF020132 -#define OID_SKGE_STAT_TX_127 0xFF020133 -#define OID_SKGE_STAT_TX_255 0xFF020134 -#define OID_SKGE_STAT_TX_511 0xFF020135 -#define OID_SKGE_STAT_TX_1023 0xFF020136 -#define OID_SKGE_STAT_TX_MAX 0xFF020137 -#define OID_SKGE_STAT_TX_SYNC 0xFF020138 +#define OID_SKGE_STAT_TX_64 0xFF020132 +#define OID_SKGE_STAT_TX_127 0xFF020133 +#define OID_SKGE_STAT_TX_255 0xFF020134 +#define OID_SKGE_STAT_TX_511 0xFF020135 +#define OID_SKGE_STAT_TX_1023 0xFF020136 +#define OID_SKGE_STAT_TX_MAX 0xFF020137 +#define OID_SKGE_STAT_TX_SYNC 0xFF020138 #define OID_SKGE_STAT_TX_SYNC_OCTETS 0xFF020139 -#define OID_SKGE_STAT_RX 0xFF02013A -#define OID_SKGE_STAT_RX_OCTETS 0xFF02013B -#define OID_SKGE_STAT_RX_BROADCAST 0xFF02013C -#define OID_SKGE_STAT_RX_MULTICAST 0xFF02013D -#define OID_SKGE_STAT_RX_UNICAST 0xFF02013E -#define OID_SKGE_STAT_RX_PFLOWC 0xFF02013F -#define OID_SKGE_STAT_RX_FLOWC 0xFF020140 -#define OID_SKGE_STAT_RX_PFLOWC_ERR 0xFF020141 +#define OID_SKGE_STAT_RX 0xFF02013A +#define OID_SKGE_STAT_RX_OCTETS 0xFF02013B +#define OID_SKGE_STAT_RX_BROADCAST 0xFF02013C +#define OID_SKGE_STAT_RX_MULTICAST 0xFF02013D +#define OID_SKGE_STAT_RX_UNICAST 0xFF02013E +#define OID_SKGE_STAT_RX_PFLOWC 0xFF02013F +#define OID_SKGE_STAT_RX_FLOWC 0xFF020140 +#define OID_SKGE_STAT_RX_PFLOWC_ERR 0xFF020141 #define OID_SKGE_STAT_RX_FLOWC_UNKWN 0xFF020142 -#define OID_SKGE_STAT_RX_BURST 0xFF020143 -#define OID_SKGE_STAT_RX_MISSED 0xFF020144 -#define OID_SKGE_STAT_RX_FRAMING 0xFF020145 -#define OID_SKGE_STAT_RX_OVERFLOW 0xFF020146 -#define OID_SKGE_STAT_RX_JABBER 0xFF020147 -#define OID_SKGE_STAT_RX_CARRIER 0xFF020148 -#define OID_SKGE_STAT_RX_IR_LENGTH 0xFF020149 -#define OID_SKGE_STAT_RX_SYMBOL 0xFF02014A -#define OID_SKGE_STAT_RX_SHORTS 0xFF02014B -#define OID_SKGE_STAT_RX_RUNT 0xFF02014C -#define OID_SKGE_STAT_RX_CEXT 0xFF02014D -#define OID_SKGE_STAT_RX_TOO_LONG 0xFF02014E -#define OID_SKGE_STAT_RX_FCS 0xFF02014F +#define OID_SKGE_STAT_RX_BURST 0xFF020143 +#define OID_SKGE_STAT_RX_MISSED 0xFF020144 +#define OID_SKGE_STAT_RX_FRAMING 0xFF020145 +#define OID_SKGE_STAT_RX_OVERFLOW 0xFF020146 +#define OID_SKGE_STAT_RX_JABBER 0xFF020147 +#define OID_SKGE_STAT_RX_CARRIER 0xFF020148 +#define OID_SKGE_STAT_RX_IR_LENGTH 0xFF020149 +#define OID_SKGE_STAT_RX_SYMBOL 0xFF02014A +#define OID_SKGE_STAT_RX_SHORTS 0xFF02014B +#define OID_SKGE_STAT_RX_RUNT 0xFF02014C +#define OID_SKGE_STAT_RX_CEXT 0xFF02014D +#define OID_SKGE_STAT_RX_TOO_LONG 0xFF02014E +#define OID_SKGE_STAT_RX_FCS 0xFF02014F /* #define OID_SKGE_STAT_RX_UTIL 0xFF020150 */ -#define OID_SKGE_STAT_RX_64 0xFF020151 -#define OID_SKGE_STAT_RX_127 0xFF020152 -#define OID_SKGE_STAT_RX_255 0xFF020153 -#define OID_SKGE_STAT_RX_511 0xFF020154 -#define OID_SKGE_STAT_RX_1023 0xFF020155 -#define OID_SKGE_STAT_RX_MAX 0xFF020156 -#define OID_SKGE_STAT_RX_LONGFRAMES 0xFF020157 - -#define OID_SKGE_DIAG_ACTION 0xFF01011D -#define OID_SKGE_DIAG_RESULT 0xFF01011E -#define OID_SKGE_MTU 0xFF01011F -#define OID_SKGE_PHYS_CUR_ADDR 0xFF010120 -#define OID_SKGE_PHYS_FAC_ADDR 0xFF010121 -#define OID_SKGE_PMD 0xFF010122 -#define OID_SKGE_CONNECTOR 0xFF010123 -#define OID_SKGE_LINK_CAP 0xFF010124 -#define OID_SKGE_LINK_MODE 0xFF010125 -#define OID_SKGE_LINK_MODE_STATUS 0xFF010126 -#define OID_SKGE_LINK_STATUS 0xFF010127 -#define OID_SKGE_FLOWCTRL_CAP 0xFF010128 -#define OID_SKGE_FLOWCTRL_MODE 0xFF010129 -#define OID_SKGE_FLOWCTRL_STATUS 0xFF01012A -#define OID_SKGE_PHY_OPERATION_CAP 0xFF01012B -#define OID_SKGE_PHY_OPERATION_MODE 0xFF01012C -#define OID_SKGE_PHY_OPERATION_STATUS 0xFF01012D -#define OID_SKGE_MULTICAST_LIST 0xFF01012E -#define OID_SKGE_CURRENT_PACKET_FILTER 0xFF01012F - -#define OID_SKGE_TRAP 0xFF010130 -#define OID_SKGE_TRAP_NUMBER 0xFF010131 - -#define OID_SKGE_RLMT_MODE 0xFF010140 -#define OID_SKGE_RLMT_PORT_NUMBER 0xFF010141 -#define OID_SKGE_RLMT_PORT_ACTIVE 0xFF010142 -#define OID_SKGE_RLMT_PORT_PREFERRED 0xFF010143 -#define OID_SKGE_INTERMEDIATE_SUPPORT 0xFF010160 -#define OID_SKGE_RLMT_CHANGE_CTS 0xFF020160 -#define OID_SKGE_RLMT_CHANGE_TIME 0xFF020161 -#define OID_SKGE_RLMT_CHANGE_ESTIM 0xFF020162 -#define OID_SKGE_RLMT_CHANGE_THRES 0xFF020163 - -#define OID_SKGE_RLMT_PORT_INDEX 0xFF020164 -#define OID_SKGE_RLMT_STATUS 0xFF020165 -#define OID_SKGE_RLMT_TX_HELLO_CTS 0xFF020166 -#define OID_SKGE_RLMT_RX_HELLO_CTS 0xFF020167 -#define OID_SKGE_RLMT_TX_SP_REQ_CTS 0xFF020168 -#define OID_SKGE_RLMT_RX_SP_CTS 0xFF020169 +#define OID_SKGE_STAT_RX_64 0xFF020151 +#define OID_SKGE_STAT_RX_127 0xFF020152 +#define OID_SKGE_STAT_RX_255 0xFF020153 +#define OID_SKGE_STAT_RX_511 0xFF020154 +#define OID_SKGE_STAT_RX_1023 0xFF020155 +#define OID_SKGE_STAT_RX_MAX 0xFF020156 +#define OID_SKGE_STAT_RX_LONGFRAMES 0xFF020157 + +#define OID_SKGE_RLMT_CHANGE_CTS 0xFF020160 +#define OID_SKGE_RLMT_CHANGE_TIME 0xFF020161 +#define OID_SKGE_RLMT_CHANGE_ESTIM 0xFF020162 +#define OID_SKGE_RLMT_CHANGE_THRES 0xFF020163 + +#define OID_SKGE_RLMT_PORT_INDEX 0xFF020164 +#define OID_SKGE_RLMT_STATUS 0xFF020165 +#define OID_SKGE_RLMT_TX_HELLO_CTS 0xFF020166 +#define OID_SKGE_RLMT_RX_HELLO_CTS 0xFF020167 +#define OID_SKGE_RLMT_TX_SP_REQ_CTS 0xFF020168 +#define OID_SKGE_RLMT_RX_SP_CTS 0xFF020169 #define OID_SKGE_RLMT_MONITOR_NUMBER 0xFF010150 -#define OID_SKGE_RLMT_MONITOR_INDEX 0xFF010151 -#define OID_SKGE_RLMT_MONITOR_ADDR 0xFF010152 -#define OID_SKGE_RLMT_MONITOR_ERRS 0xFF010153 +#define OID_SKGE_RLMT_MONITOR_INDEX 0xFF010151 +#define OID_SKGE_RLMT_MONITOR_ADDR 0xFF010152 +#define OID_SKGE_RLMT_MONITOR_ERRS 0xFF010153 #define OID_SKGE_RLMT_MONITOR_TIMESTAMP 0xFF010154 -#define OID_SKGE_RLMT_MONITOR_ADMIN 0xFF010155 +#define OID_SKGE_RLMT_MONITOR_ADMIN 0xFF010155 -#define OID_SKGE_TX_SW_QUEUE_LEN 0xFF020170 -#define OID_SKGE_TX_SW_QUEUE_MAX 0xFF020171 -#define OID_SKGE_TX_RETRY 0xFF020172 -#define OID_SKGE_RX_INTR_CTS 0xFF020173 -#define OID_SKGE_TX_INTR_CTS 0xFF020174 -#define OID_SKGE_RX_NO_BUF_CTS 0xFF020175 -#define OID_SKGE_TX_NO_BUF_CTS 0xFF020176 -#define OID_SKGE_TX_USED_DESCR_NO 0xFF020177 -#define OID_SKGE_RX_DELIVERED_CTS 0xFF020178 +#define OID_SKGE_TX_SW_QUEUE_LEN 0xFF020170 +#define OID_SKGE_TX_SW_QUEUE_MAX 0xFF020171 +#define OID_SKGE_TX_RETRY 0xFF020172 +#define OID_SKGE_RX_INTR_CTS 0xFF020173 +#define OID_SKGE_TX_INTR_CTS 0xFF020174 +#define OID_SKGE_RX_NO_BUF_CTS 0xFF020175 +#define OID_SKGE_TX_NO_BUF_CTS 0xFF020176 +#define OID_SKGE_TX_USED_DESCR_NO 0xFF020177 +#define OID_SKGE_RX_DELIVERED_CTS 0xFF020178 #define OID_SKGE_RX_OCTETS_DELIV_CTS 0xFF020179 -#define OID_SKGE_RX_HW_ERROR_CTS 0xFF02017A -#define OID_SKGE_TX_HW_ERROR_CTS 0xFF02017B -#define OID_SKGE_IN_ERRORS_CTS 0xFF02017C -#define OID_SKGE_OUT_ERROR_CTS 0xFF02017D -#define OID_SKGE_ERR_RECOVERY_CTS 0xFF02017E -#define OID_SKGE_SYSUPTIME 0xFF02017F - -#define OID_SKGE_ALL_DATA 0xFF020190 - - -#define OID_SKGE_TRAP_SEN_WAR_LOW 500 -#define OID_SKGE_TRAP_SEN_WAR_UPP 501 -#define OID_SKGE_TRAP_SEN_ERR_LOW 502 -#define OID_SKGE_TRAP_SEN_ERR_UPP 503 +#define OID_SKGE_RX_HW_ERROR_CTS 0xFF02017A +#define OID_SKGE_TX_HW_ERROR_CTS 0xFF02017B +#define OID_SKGE_IN_ERRORS_CTS 0xFF02017C +#define OID_SKGE_OUT_ERROR_CTS 0xFF02017D +#define OID_SKGE_ERR_RECOVERY_CTS 0xFF02017E +#define OID_SKGE_SYSUPTIME 0xFF02017F + +#define OID_SKGE_ALL_DATA 0xFF020190 + +/* Defines for VCT. */ +#define OID_SKGE_VCT_GET 0xFF020200 +#define OID_SKGE_VCT_SET 0xFF020201 +#define OID_SKGE_VCT_STATUS 0xFF020202 + + +/* VCT struct to store a backup copy of VCT data after a port reset. */ +typedef struct s_PnmiVct { + SK_U8 VctStatus; + SK_U8 PCableLen; + SK_U32 PMdiPairLen[4]; + SK_U8 PMdiPairSts[4]; +} SK_PNMI_VCT; + + +/* VCT status values (to be given to CPA via OID_SKGE_VCT_STATUS). */ +#define SK_PNMI_VCT_NONE 0 +#define SK_PNMI_VCT_OLD_VCT_DATA 1 +#define SK_PNMI_VCT_NEW_VCT_DATA 2 +#define SK_PNMI_VCT_OLD_DSP_DATA 4 +#define SK_PNMI_VCT_NEW_DSP_DATA 8 +#define SK_PNMI_VCT_RUNNING 16 + + +/* VCT cable test status. */ +#define SK_PNMI_VCT_NORMAL_CABLE 0 +#define SK_PNMI_VCT_SHORT_CABLE 1 +#define SK_PNMI_VCT_OPEN_CABLE 2 +#define SK_PNMI_VCT_TEST_FAIL 3 +#define SK_PNMI_VCT_IMPEDANCE_MISMATCH 4 + +#define OID_SKGE_TRAP_SEN_WAR_LOW 500 +#define OID_SKGE_TRAP_SEN_WAR_UPP 501 +#define OID_SKGE_TRAP_SEN_ERR_LOW 502 +#define OID_SKGE_TRAP_SEN_ERR_UPP 503 #define OID_SKGE_TRAP_RLMT_CHANGE_THRES 520 #define OID_SKGE_TRAP_RLMT_CHANGE_PORT 521 #define OID_SKGE_TRAP_RLMT_PORT_DOWN 522 -#define OID_SKGE_TRAP_RLMT_PORT_UP 523 +#define OID_SKGE_TRAP_RLMT_PORT_UP 523 #define OID_SKGE_TRAP_RLMT_SEGMENTATION 524 @@ -539,7 +623,7 @@ #define SK_PNMI_ERR012 (SK_ERRBASE_PNMI + 12) #define SK_PNMI_ERR012MSG "SensorStat: Unknown OID" #define SK_PNMI_ERR013 (SK_ERRBASE_PNMI + 13) -#define SK_PNMI_ERR013MSG "SensorStat: Unknown OID should be errored before" +#define SK_PNMI_ERR013MSG "" #define SK_PNMI_ERR014 (SK_ERRBASE_PNMI + 14) #define SK_PNMI_ERR014MSG "Vpd: Cannot read VPD keys" #define SK_PNMI_ERR015 (SK_ERRBASE_PNMI + 15) @@ -583,7 +667,7 @@ #define SK_PNMI_ERR035 (SK_ERRBASE_PNMI + 35) #define SK_PNMI_ERR035MSG "Rlmt: Unknown OID" #define SK_PNMI_ERR036 (SK_ERRBASE_PNMI + 36) -#define SK_PNMI_ERR036MSG "Rlmt: Unknown OID should be errored before" +#define SK_PNMI_ERR036MSG "" #define SK_PNMI_ERR037 (SK_ERRBASE_PNMI + 37) #define SK_PNMI_ERR037MSG "Rlmt: SK_RLMT_MODE_CHANGE event return not 0" #define SK_PNMI_ERR038 (SK_ERRBASE_PNMI + 38) @@ -591,17 +675,17 @@ #define SK_PNMI_ERR039 (SK_ERRBASE_PNMI + 39) #define SK_PNMI_ERR039MSG "RlmtStat: Unknown OID" #define SK_PNMI_ERR040 (SK_ERRBASE_PNMI + 40) -#define SK_PNMI_ERR040MSG "RlmtStat: Unknown OID should be errored before" +#define SK_PNMI_ERR040MSG "PowerManagement: Unknown OID" #define SK_PNMI_ERR041 (SK_ERRBASE_PNMI + 41) #define SK_PNMI_ERR041MSG "MacPrivateConf: Unknown OID" #define SK_PNMI_ERR042 (SK_ERRBASE_PNMI + 42) -#define SK_PNMI_ERR042MSG "MacPrivateConf: Unknown OID should be errored before" +#define SK_PNMI_ERR042MSG "MacPrivateConf: SK_HWEV_SET_ROLE returned not 0" #define SK_PNMI_ERR043 (SK_ERRBASE_PNMI + 43) #define SK_PNMI_ERR043MSG "MacPrivateConf: SK_HWEV_SET_LMODE returned not 0" #define SK_PNMI_ERR044 (SK_ERRBASE_PNMI + 44) #define SK_PNMI_ERR044MSG "MacPrivateConf: SK_HWEV_SET_FLOWMODE returned not 0" #define SK_PNMI_ERR045 (SK_ERRBASE_PNMI + 45) -#define SK_PNMI_ERR045MSG "MacPrivateConf: Unknown OID in set action" +#define SK_PNMI_ERR045MSG "MacPrivateConf: SK_HWEV_SET_SPEED returned not 0" #define SK_PNMI_ERR046 (SK_ERRBASE_PNMI + 46) #define SK_PNMI_ERR046MSG "Monitor: Unknown OID" #define SK_PNMI_ERR047 (SK_ERRBASE_PNMI + 47) @@ -609,13 +693,13 @@ #define SK_PNMI_ERR048 (SK_ERRBASE_PNMI + 48) #define SK_PNMI_ERR048MSG "RlmtUpdate: Event function returns not 0" #define SK_PNMI_ERR049 (SK_ERRBASE_PNMI + 49) -#define SK_PNMI_ERR049MSG "" +#define SK_PNMI_ERR049MSG "SkPnmiInit: Invalid size of 'CounterOffset' struct!!" #define SK_PNMI_ERR050 (SK_ERRBASE_PNMI + 50) -#define SK_PNMI_ERR050MSG "MacUpdate: Cannot update statistic counter" +#define SK_PNMI_ERR050MSG "SkPnmiInit: Invalid size of 'StatAddr' table!!" #define SK_PNMI_ERR051 (SK_ERRBASE_PNMI + 51) #define SK_PNMI_ERR051MSG "SkPnmiEvent: Port switch suspicious" #define SK_PNMI_ERR052 (SK_ERRBASE_PNMI + 52) -#define SK_PNMI_ERR052MSG "MacPrivateConf: SK_HWEV_SET_ROLE returned not 0" +#define SK_PNMI_ERR052MSG "" /* * Management counter macros called by the driver @@ -659,7 +743,21 @@ #define SK_PNMI_CNT_RX_LONGFRAMES(pAC,p) \ { \ if ((p) < SK_MAX_MACS) { \ - ((pAC)->Pnmi.Port[p].StatRxLongFrameCts)++; \ + ((pAC)->Pnmi.Port[p].StatRxLongFrameCts++); \ + } \ + } + +#define SK_PNMI_CNT_RX_FRAMETOOLONG(pAC,p) \ + { \ + if ((p) < SK_MAX_MACS) { \ + ((pAC)->Pnmi.Port[p].StatRxFrameTooLongCts++); \ + } \ + } + +#define SK_PNMI_CNT_RX_PMACC_ERR(pAC,p) \ + { \ + if ((p) < SK_MAX_MACS) { \ + ((pAC)->Pnmi.Port[p].StatRxPMaccErr++); \ } \ } @@ -685,12 +783,12 @@ #define SK_PNMI_MULTICAST_LISTLEN 64 #define SK_PNMI_SENSOR_ENTRIES (SK_MAX_SENSORS) #define SK_PNMI_CHECKSUM_ENTRIES 3 -#define SK_PNMI_MAC_ENTRIES (SK_MAX_MACS + 1) +#define SK_PNMI_MAC_ENTRIES (SK_MAX_MACS + 1) #define SK_PNMI_MONITOR_ENTRIES 20 #define SK_PNMI_TRAP_ENTRIES 10 -#define SK_PNMI_TRAPLEN 128 -#define SK_PNMI_STRINGLEN1 80 -#define SK_PNMI_STRINGLEN2 25 +#define SK_PNMI_TRAPLEN 128 +#define SK_PNMI_STRINGLEN1 80 +#define SK_PNMI_STRINGLEN2 25 #define SK_PNMI_TRAP_QUEUE_LEN 512 typedef struct s_PnmiVpd { @@ -798,6 +896,9 @@ SK_U8 ConfPhyOperationCapability; SK_U8 ConfPhyOperationMode; SK_U8 ConfPhyOperationStatus; + SK_U8 ConfSpeedCapability; + SK_U8 ConfSpeedMode; + SK_U8 ConfSpeedStatus; } SK_PNMI_CONF; typedef struct s_PnmiRlmt { @@ -843,11 +944,11 @@ SK_U8 BusSpeed; SK_U8 BusWidth; SK_U8 SensorNumber; - SK_PNMI_SENSOR Sensor[SK_PNMI_SENSOR_ENTRIES]; + SK_PNMI_SENSOR Sensor[SK_PNMI_SENSOR_ENTRIES]; SK_U8 ChecksumNumber; SK_PNMI_CHECKSUM Checksum[SK_PNMI_CHECKSUM_ENTRIES]; - SK_PNMI_STAT Stat[SK_PNMI_MAC_ENTRIES]; - SK_PNMI_CONF Conf[SK_PNMI_MAC_ENTRIES]; + SK_PNMI_STAT Stat[SK_PNMI_MAC_ENTRIES]; + SK_PNMI_CONF Conf[SK_PNMI_MAC_ENTRIES]; SK_U8 RlmtMode; SK_U32 RlmtPortNumber; SK_U8 RlmtPortActive; @@ -856,7 +957,7 @@ SK_U64 RlmtChangeTime; SK_U64 RlmtChangeEstimate; SK_U64 RlmtChangeThreshold; - SK_PNMI_RLMT Rlmt[SK_MAX_MACS]; + SK_PNMI_RLMT Rlmt[SK_MAX_MACS]; SK_U32 RlmtMonitorNumber; SK_PNMI_RLMT_MONITOR RlmtMonitor[SK_PNMI_MONITOR_ENTRIES]; SK_U32 TrapNumber; @@ -882,25 +983,27 @@ #define SK_PNMI_STRUCT_SIZE (sizeof(SK_PNMI_STRUCT_DATA)) #define SK_PNMI_MIN_STRUCT_SIZE ((unsigned int)(SK_UPTR)\ &(((SK_PNMI_STRUCT_DATA *)0)->VpdFreeBytes)) - /* - * ReturnStatus field - * must be located - * before VpdFreeBytes - */ + /* + * ReturnStatus field + * must be located + * before VpdFreeBytes + */ /* * Various definitions */ #define SK_PNMI_MAX_PROTOS 3 -#define SK_PNMI_SCNT_NOT 64 -#define SK_PNMI_CNT_NO 67 +#define SK_PNMI_CNT_NO 66 /* Must have the value of the enum + * SK_PNMI_MAX_IDX. Define SK_PNMI_CHECK + * for check while init phase 1 + */ /* * Estimate data structure */ typedef struct s_PnmiEstimate { - unsigned int EstValueIndex; + unsigned int EstValueIndex; SK_U64 EstValue[7]; SK_U64 Estimate; SK_TIMER EstTimer; @@ -908,15 +1011,22 @@ /* - * PNMI specific adatper context structure + * VCT timer data structure + */ +typedef struct s_VctTimer { + SK_TIMER VctTimer; +} SK_PNMI_VCT_TIMER; + + +/* + * PNMI specific adapter context structure */ typedef struct s_PnmiPort { - SK_U32 CounterHigh[SK_PNMI_SCNT_NOT]; - SK_U64 CounterOffset[SK_PNMI_CNT_NO]; SK_U64 StatSyncCts; SK_U64 StatSyncOctetsCts; SK_U64 StatRxLongFrameCts; - SK_BOOL ActiveFlag; + SK_U64 StatRxFrameTooLongCts; + SK_U64 StatRxPMaccErr; SK_U64 TxSwQueueLen; SK_U64 TxSwQueueMax; SK_U64 TxRetryCts; @@ -932,41 +1042,53 @@ SK_U64 InErrorsCts; SK_U64 OutErrorsCts; SK_U64 ErrRecoveryCts; + SK_U64 RxShortZeroMark; + SK_U64 CounterOffset[SK_PNMI_CNT_NO]; + SK_U32 CounterHigh[SK_PNMI_CNT_NO]; + SK_BOOL ActiveFlag; + SK_U8 Align[3]; } SK_PNMI_PORT; typedef struct s_PnmiData { - SK_PNMI_PORT Port[SK_MAX_MACS]; + SK_PNMI_PORT Port [SK_MAX_MACS]; + SK_PNMI_PORT BufPort [SK_MAX_MACS]; /* 2002-09-13 pweber */ SK_U64 VirtualCounterOffset[SK_PNMI_CNT_NO]; SK_U32 TestResult; char HwVersion[10]; + SK_U16 Align01; char *pDriverDescription; char *pDriverVersion; - char TrapBuf[SK_PNMI_TRAP_QUEUE_LEN]; - unsigned int TrapBufFree; - unsigned int TrapQueueBeg; - unsigned int TrapQueueEnd; - unsigned int TrapBufPad; - unsigned int TrapUnique; - - int MacUpdatedFlag; - int RlmtUpdatedFlag; - int SirqUpdatedFlag; + int MacUpdatedFlag; + int RlmtUpdatedFlag; + int SirqUpdatedFlag; SK_U64 RlmtChangeCts; SK_U64 RlmtChangeTime; SK_PNMI_ESTIMATE RlmtChangeEstimate; SK_U64 RlmtChangeThreshold; + SK_U64 StartUpTime; SK_U32 DeviceType; char PciBusSpeed; char PciBusWidth; + char Chipset; char PMD; char Connector; - SK_U64 StartUpTime; SK_BOOL DualNetActiveFlag; + SK_U16 Align02; + + char TrapBuf[SK_PNMI_TRAP_QUEUE_LEN]; + unsigned int TrapBufFree; + unsigned int TrapQueueBeg; + unsigned int TrapQueueEnd; + unsigned int TrapBufPad; + unsigned int TrapUnique; + SK_U8 VctStatus[SK_MAX_MACS]; + SK_PNMI_VCT VctBackup[SK_MAX_MACS]; + SK_PNMI_VCT_TIMER VctTimeout[SK_MAX_MACS]; } SK_PNMI; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/skgesirq.h linux.21pre4-ac2/drivers/net/sk98lin/h/skgesirq.h --- linux.21pre4/drivers/net/sk98lin/h/skgesirq.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/skgesirq.h 2003-01-06 15:38:18.000000000 +0000 @@ -2,15 +2,15 @@ * * Name: skgesirq.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.22 $ - * Date: $Date: 2000/11/09 11:30:10 $ + * Version: $Revision: 1.26 $ + * Date: $Date: 2002/10/14 09:52:36 $ * Purpose: SK specific Gigabit Ethernet special IRQ functions * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2000 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -25,6 +25,21 @@ * * History: * $Log: skgesirq.h,v $ + * Revision 1.26 2002/10/14 09:52:36 rschmidt + * Added SKERR_SIRQ_E023 and SKERR_SIRQ_E023 for GPHY (Yukon) + * Editorial changes + * + * Revision 1.25 2002/07/15 18:15:52 rwahl + * Editorial changes. + * + * Revision 1.24 2002/07/15 15:39:21 rschmidt + * Corrected define for SKERR_SIRQ_E022 + * Editorial changes + * + * Revision 1.23 2002/04/25 11:09:45 rschmidt + * Removed declarations for SkXmInitPhy(), SkXmRxTxEnable() + * Editorial changes + * * Revision 1.22 2000/11/09 11:30:10 rassmann * WA: Waiting after releasing reset until BCom chip is accessible. * @@ -66,7 +81,7 @@ * defined in skgeinit.h now. * * Revision 1.9 1998/10/14 14:00:39 gklug - * add: eroor logs for init phys + * add: error logs for init phys * * Revision 1.8 1998/10/14 05:44:05 gklug * add: E020 @@ -103,73 +118,76 @@ /* * Define the Event the special IRQ/INI module can handle */ -#define SK_HWEV_WATIM 1 /* Timeout for WA errata #2 XMAC */ -#define SK_HWEV_PORT_START 2 /* Port Start Event by RLMT */ -#define SK_HWEV_PORT_STOP 3 /* Port Stop Event by RLMT */ -#define SK_HWEV_CLEAR_STAT 4 /* Clear Statistics by PNMI */ -#define SK_HWEV_UPDATE_STAT 5 /* Update Statistics by PNMI */ -#define SK_HWEV_SET_LMODE 6 /* Set Link Mode by PNMI */ +#define SK_HWEV_WATIM 1 /* Timeout for WA errata #2 XMAC */ +#define SK_HWEV_PORT_START 2 /* Port Start Event by RLMT */ +#define SK_HWEV_PORT_STOP 3 /* Port Stop Event by RLMT */ +#define SK_HWEV_CLEAR_STAT 4 /* Clear Statistics by PNMI */ +#define SK_HWEV_UPDATE_STAT 5 /* Update Statistics by PNMI */ +#define SK_HWEV_SET_LMODE 6 /* Set Link Mode by PNMI */ #define SK_HWEV_SET_FLOWMODE 7 /* Set Flow Control Mode by PNMI */ -#define SK_HWEV_SET_ROLE 8 /* Set Master/Slave (Role) by PNMI */ -#define SK_HWEV_HALFDUP_CHK 9 /* Set Master/Slave (Role) by PNMI */ +#define SK_HWEV_SET_ROLE 8 /* Set Master/Slave (Role) by PNMI */ +#define SK_HWEV_SET_SPEED 9 /* Set Link Speed by PNMI */ +#define SK_HWEV_HALFDUP_CHK 10 /* Half Duplex Hangup Workaround */ -#define SK_WA_ACT_TIME (5000000L) /* 5 sec */ -#define SK_WA_INA_TIME (100000L) /* 100 msec */ +#define SK_WA_ACT_TIME (5000000L) /* 5 sec */ +#define SK_WA_INA_TIME (100000L) /* 100 msec */ -#define SK_HALFDUP_CHK_TIME (10000L) /* 10 msec */ +#define SK_HALFDUP_CHK_TIME (10000L) /* 10 msec */ /* * Define the error numbers and messages */ -#define SKERR_SIRQ_E001 (SK_ERRBASE_SIRQ+0) -#define SKERR_SIRQ_E001MSG "Unknown event" -#define SKERR_SIRQ_E002 (SKERR_SIRQ_E001+1) -#define SKERR_SIRQ_E002MSG "Packet timeout RX1" -#define SKERR_SIRQ_E003 (SKERR_SIRQ_E002+1) -#define SKERR_SIRQ_E003MSG "Packet timeout RX2" -#define SKERR_SIRQ_E004 (SKERR_SIRQ_E003+1) -#define SKERR_SIRQ_E004MSG "XMAC 1 not correctly initialized" -#define SKERR_SIRQ_E005 (SKERR_SIRQ_E004+1) -#define SKERR_SIRQ_E005MSG "XMAC 2 not correctly initialized" -#define SKERR_SIRQ_E006 (SKERR_SIRQ_E005+1) -#define SKERR_SIRQ_E006MSG "CHECK failure R1" -#define SKERR_SIRQ_E007 (SKERR_SIRQ_E006+1) -#define SKERR_SIRQ_E007MSG "CHECK failure R2" -#define SKERR_SIRQ_E008 (SKERR_SIRQ_E007+1) -#define SKERR_SIRQ_E008MSG "CHECK failure XS1" -#define SKERR_SIRQ_E009 (SKERR_SIRQ_E008+1) -#define SKERR_SIRQ_E009MSG "CHECK failure XA1" -#define SKERR_SIRQ_E010 (SKERR_SIRQ_E009+1) -#define SKERR_SIRQ_E010MSG "CHECK failure XS2" -#define SKERR_SIRQ_E011 (SKERR_SIRQ_E010+1) -#define SKERR_SIRQ_E011MSG "CHECK failure XA2" -#define SKERR_SIRQ_E012 (SKERR_SIRQ_E011+1) -#define SKERR_SIRQ_E012MSG "unexpected IRQ Master error" -#define SKERR_SIRQ_E013 (SKERR_SIRQ_E012+1) -#define SKERR_SIRQ_E013MSG "unexpected IRQ Status error" -#define SKERR_SIRQ_E014 (SKERR_SIRQ_E013+1) -#define SKERR_SIRQ_E014MSG "Parity error on RAM (read)" -#define SKERR_SIRQ_E015 (SKERR_SIRQ_E014+1) -#define SKERR_SIRQ_E015MSG "Parity error on RAM (write)" -#define SKERR_SIRQ_E016 (SKERR_SIRQ_E015+1) -#define SKERR_SIRQ_E016MSG "Parity error MAC 1" -#define SKERR_SIRQ_E017 (SKERR_SIRQ_E016+1) -#define SKERR_SIRQ_E017MSG "Parity error MAC 2" -#define SKERR_SIRQ_E018 (SKERR_SIRQ_E017+1) -#define SKERR_SIRQ_E018MSG "Parity error RX 1" -#define SKERR_SIRQ_E019 (SKERR_SIRQ_E018+1) -#define SKERR_SIRQ_E019MSG "Parity error RX 2" -#define SKERR_SIRQ_E020 (SKERR_SIRQ_E019+1) -#define SKERR_SIRQ_E020MSG "XMAC transmit FIFO underrun" -#define SKERR_SIRQ_E021 (SKERR_SIRQ_E020+1) -#define SKERR_SIRQ_E021MSG "Spurious I2C interrupt" -#define SKERR_SIRQ_E022 (SKERR_SIRQ_E020+1) -#define SKERR_SIRQ_E022MSG "Cable pair swap error" +#define SKERR_SIRQ_E001 (SK_ERRBASE_SIRQ+0) +#define SKERR_SIRQ_E001MSG "Unknown event" +#define SKERR_SIRQ_E002 (SKERR_SIRQ_E001+1) +#define SKERR_SIRQ_E002MSG "Packet timeout RX1" +#define SKERR_SIRQ_E003 (SKERR_SIRQ_E002+1) +#define SKERR_SIRQ_E003MSG "Packet timeout RX2" +#define SKERR_SIRQ_E004 (SKERR_SIRQ_E003+1) +#define SKERR_SIRQ_E004MSG "MAC 1 not correctly initialized" +#define SKERR_SIRQ_E005 (SKERR_SIRQ_E004+1) +#define SKERR_SIRQ_E005MSG "MAC 2 not correctly initialized" +#define SKERR_SIRQ_E006 (SKERR_SIRQ_E005+1) +#define SKERR_SIRQ_E006MSG "CHECK failure R1" +#define SKERR_SIRQ_E007 (SKERR_SIRQ_E006+1) +#define SKERR_SIRQ_E007MSG "CHECK failure R2" +#define SKERR_SIRQ_E008 (SKERR_SIRQ_E007+1) +#define SKERR_SIRQ_E008MSG "CHECK failure XS1" +#define SKERR_SIRQ_E009 (SKERR_SIRQ_E008+1) +#define SKERR_SIRQ_E009MSG "CHECK failure XA1" +#define SKERR_SIRQ_E010 (SKERR_SIRQ_E009+1) +#define SKERR_SIRQ_E010MSG "CHECK failure XS2" +#define SKERR_SIRQ_E011 (SKERR_SIRQ_E010+1) +#define SKERR_SIRQ_E011MSG "CHECK failure XA2" +#define SKERR_SIRQ_E012 (SKERR_SIRQ_E011+1) +#define SKERR_SIRQ_E012MSG "unexpected IRQ Master error" +#define SKERR_SIRQ_E013 (SKERR_SIRQ_E012+1) +#define SKERR_SIRQ_E013MSG "unexpected IRQ Status error" +#define SKERR_SIRQ_E014 (SKERR_SIRQ_E013+1) +#define SKERR_SIRQ_E014MSG "Parity error on RAM (read)" +#define SKERR_SIRQ_E015 (SKERR_SIRQ_E014+1) +#define SKERR_SIRQ_E015MSG "Parity error on RAM (write)" +#define SKERR_SIRQ_E016 (SKERR_SIRQ_E015+1) +#define SKERR_SIRQ_E016MSG "Parity error MAC 1" +#define SKERR_SIRQ_E017 (SKERR_SIRQ_E016+1) +#define SKERR_SIRQ_E017MSG "Parity error MAC 2" +#define SKERR_SIRQ_E018 (SKERR_SIRQ_E017+1) +#define SKERR_SIRQ_E018MSG "Parity error RX 1" +#define SKERR_SIRQ_E019 (SKERR_SIRQ_E018+1) +#define SKERR_SIRQ_E019MSG "Parity error RX 2" +#define SKERR_SIRQ_E020 (SKERR_SIRQ_E019+1) +#define SKERR_SIRQ_E020MSG "MAC transmit FIFO underrun" +#define SKERR_SIRQ_E021 (SKERR_SIRQ_E020+1) +#define SKERR_SIRQ_E021MSG "Spurious TWSI interrupt" +#define SKERR_SIRQ_E022 (SKERR_SIRQ_E021+1) +#define SKERR_SIRQ_E022MSG "Cable pair swap error" +#define SKERR_SIRQ_E023 (SKERR_SIRQ_E022+1) +#define SKERR_SIRQ_E023MSG "Auto-negotiation error" +#define SKERR_SIRQ_E024 (SKERR_SIRQ_E023+1) +#define SKERR_SIRQ_E024MSG "FIFO overflow error" extern void SkGeSirqIsr(SK_AC *pAC, SK_IOC IoC, SK_U32 Istatus); extern int SkGeSirqEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Para); -extern void SkXmInitPhy( SK_AC *pAC, SK_IOC IoC, int Port, SK_BOOL DoLoop); -extern int SkXmRxTxEnable(SK_AC *pAC, SK_IOC IoC, int Port); extern void SkHWLinkUp(SK_AC *pAC, SK_IOC IoC, int Port); extern void SkHWLinkDown(SK_AC *pAC, SK_IOC IoC, int Port); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/ski2c.h linux.21pre4-ac2/drivers/net/sk98lin/h/ski2c.h --- linux.21pre4/drivers/net/sk98lin/h/ski2c.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/ski2c.h 2003-01-06 15:38:18.000000000 +0000 @@ -2,8 +2,8 @@ * * Name: ski2c.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.30 $ - * Date: $Date: 2001/04/05 11:38:09 $ + * Version: $Revision: 1.33 $ + * Date: $Date: 2002/10/14 16:40:50 $ * Purpose: Defines to access Voltage and Temperature Sensor * (taken from Monalisa (taken from Concentrator)) * @@ -28,6 +28,15 @@ * History: * * $Log: ski2c.h,v $ + * Revision 1.33 2002/10/14 16:40:50 rschmidt + * Editorial changes (TWSI) + * + * Revision 1.32 2002/08/13 08:55:07 rschmidt + * Editorial changes + * + * Revision 1.31 2002/08/06 09:44:22 jschmalz + * Extensions and changes for Yukon + * * Revision 1.30 2001/04/05 11:38:09 rassmann * Set SenState to idle in SkI2cWaitIrq(). * Changed error message in SkI2cWaitIrq(). @@ -142,127 +151,131 @@ #include "h/skgei2c.h" /* - * Define the I2c events. + * Define the I2C events. */ #define SK_I2CEV_IRQ 1 /* IRQ happened Event */ #define SK_I2CEV_TIM 2 /* Timeout event */ -#define SK_I2CEV_CLEAR 3 /* Clear Mib Values */ +#define SK_I2CEV_CLEAR 3 /* Clear MIB Values */ /* * Define READ and WRITE Constants. */ -#define I2C_READ 0 -#define I2C_WRITE 1 +#define I2C_READ 0 +#define I2C_WRITE 1 #define I2C_BURST 1 #define I2C_SIGLE 0 -#define SKERR_I2C_E001 (SK_ERRBASE_I2C+0) -#define SKERR_I2C_E001MSG "Sensor index unknown" -#define SKERR_I2C_E002 (SKERR_I2C_E001+1) -#define SKERR_I2C_E002MSG "I2C: transfer does not complete.\n" -#define SKERR_I2C_E003 (SKERR_I2C_E002+1) -#define SKERR_I2C_E003MSG "lm80: NAK on device send.\n" -#define SKERR_I2C_E004 (SKERR_I2C_E003+1) -#define SKERR_I2C_E004MSG "lm80: NAK on register send.\n" -#define SKERR_I2C_E005 (SKERR_I2C_E004+1) -#define SKERR_I2C_E005MSG "lm80: NAK on device (2) send.\n" -#define SKERR_I2C_E006 (SKERR_I2C_E005+1) -#define SKERR_I2C_E006MSG "Unknown event" -#define SKERR_I2C_E007 (SKERR_I2C_E006+1) -#define SKERR_I2C_E007MSG "LM80 read out of state" -#define SKERR_I2C_E008 (SKERR_I2C_E007+1) -#define SKERR_I2C_E008MSG "unexpected sensor read completed" -#define SKERR_I2C_E009 (SKERR_I2C_E008+1) -#define SKERR_I2C_E009MSG "WARNING: temperature sensor out of range" -#define SKERR_I2C_E010 (SKERR_I2C_E009+1) -#define SKERR_I2C_E010MSG "WARNING: voltage sensor out of range" -#define SKERR_I2C_E011 (SKERR_I2C_E010+1) -#define SKERR_I2C_E011MSG "ERROR: temperature sensor out of range" -#define SKERR_I2C_E012 (SKERR_I2C_E011+1) -#define SKERR_I2C_E012MSG "ERROR: voltage sensor out of range" -#define SKERR_I2C_E013 (SKERR_I2C_E012+1) -#define SKERR_I2C_E013MSG "ERROR: couldn't init sensor" -#define SKERR_I2C_E014 (SKERR_I2C_E013+1) -#define SKERR_I2C_E014MSG "WARNING: fan sensor out of range" -#define SKERR_I2C_E015 (SKERR_I2C_E014+1) -#define SKERR_I2C_E015MSG "ERROR: fan sensor out of range" -#define SKERR_I2C_E016 (SKERR_I2C_E015+1) -#define SKERR_I2C_E016MSG "I2C: active transfer does not complete.\n" +#define SKERR_I2C_E001 (SK_ERRBASE_I2C+0) +#define SKERR_I2C_E001MSG "Sensor index unknown" +#define SKERR_I2C_E002 (SKERR_I2C_E001+1) +#define SKERR_I2C_E002MSG "TWSI: transfer does not complete" +#define SKERR_I2C_E003 (SKERR_I2C_E002+1) +#define SKERR_I2C_E003MSG "LM80: NAK on device send" +#define SKERR_I2C_E004 (SKERR_I2C_E003+1) +#define SKERR_I2C_E004MSG "LM80: NAK on register send" +#define SKERR_I2C_E005 (SKERR_I2C_E004+1) +#define SKERR_I2C_E005MSG "LM80: NAK on device (2) send" +#define SKERR_I2C_E006 (SKERR_I2C_E005+1) +#define SKERR_I2C_E006MSG "Unknown event" +#define SKERR_I2C_E007 (SKERR_I2C_E006+1) +#define SKERR_I2C_E007MSG "LM80 read out of state" +#define SKERR_I2C_E008 (SKERR_I2C_E007+1) +#define SKERR_I2C_E008MSG "Unexpected sensor read completed" +#define SKERR_I2C_E009 (SKERR_I2C_E008+1) +#define SKERR_I2C_E009MSG "WARNING: temperature sensor out of range" +#define SKERR_I2C_E010 (SKERR_I2C_E009+1) +#define SKERR_I2C_E010MSG "WARNING: voltage sensor out of range" +#define SKERR_I2C_E011 (SKERR_I2C_E010+1) +#define SKERR_I2C_E011MSG "ERROR: temperature sensor out of range" +#define SKERR_I2C_E012 (SKERR_I2C_E011+1) +#define SKERR_I2C_E012MSG "ERROR: voltage sensor out of range" +#define SKERR_I2C_E013 (SKERR_I2C_E012+1) +#define SKERR_I2C_E013MSG "ERROR: couldn't init sensor" +#define SKERR_I2C_E014 (SKERR_I2C_E013+1) +#define SKERR_I2C_E014MSG "WARNING: fan sensor out of range" +#define SKERR_I2C_E015 (SKERR_I2C_E014+1) +#define SKERR_I2C_E015MSG "ERROR: fan sensor out of range" +#define SKERR_I2C_E016 (SKERR_I2C_E015+1) +#define SKERR_I2C_E016MSG "TWSI: active transfer does not complete" /* * Define Timeout values */ -#define SK_I2C_TIM_LONG 2000000L /* 2 seconds */ -#define SK_I2C_TIM_SHORT 100000L /* 100 milliseconds */ +#define SK_I2C_TIM_LONG 2000000L /* 2 seconds */ +#define SK_I2C_TIM_SHORT 100000L /* 100 milliseconds */ +#define SK_I2C_TIM_WATCH 1000000L /* 1 second */ /* * Define trap and error log hold times */ #ifndef SK_SEN_ERR_TR_HOLD -#define SK_SEN_ERR_TR_HOLD (4*SK_TICKS_PER_SEC) +#define SK_SEN_ERR_TR_HOLD (4*SK_TICKS_PER_SEC) #endif #ifndef SK_SEN_ERR_LOG_HOLD -#define SK_SEN_ERR_LOG_HOLD (60*SK_TICKS_PER_SEC) +#define SK_SEN_ERR_LOG_HOLD (60*SK_TICKS_PER_SEC) #endif #ifndef SK_SEN_WARN_TR_HOLD -#define SK_SEN_WARN_TR_HOLD (15*SK_TICKS_PER_SEC) +#define SK_SEN_WARN_TR_HOLD (15*SK_TICKS_PER_SEC) #endif #ifndef SK_SEN_WARN_LOG_HOLD -#define SK_SEN_WARN_LOG_HOLD (15*60*SK_TICKS_PER_SEC) +#define SK_SEN_WARN_LOG_HOLD (15*60*SK_TICKS_PER_SEC) #endif /* * Defines for SenType */ -#define SK_SEN_TEMP 1 -#define SK_SEN_VOLT 2 -#define SK_SEN_FAN 3 +#define SK_SEN_UNKNOWN 0 +#define SK_SEN_TEMP 1 +#define SK_SEN_VOLT 2 +#define SK_SEN_FAN 3 /* - * Define for the ErrorFlag + * Define for the SenErrorFlag */ -#define SK_SEN_ERR_OK 1 /* Error Flag: O.K. */ -#define SK_SEN_ERR_WARN 2 /* Error Flag: Warning */ -#define SK_SEN_ERR_ERR 3 /* Error Flag: Error */ +#define SK_SEN_ERR_NOT_PRESENT 0 /* Error Flag: Sensor not present */ +#define SK_SEN_ERR_OK 1 /* Error Flag: O.K. */ +#define SK_SEN_ERR_WARN 2 /* Error Flag: Warning */ +#define SK_SEN_ERR_ERR 3 /* Error Flag: Error */ +#define SK_SEN_ERR_FAULTY 4 /* Error Flag: Faulty */ /* * Define the Sensor struct */ struct s_Sensor { - char *SenDesc; /* Description */ - int SenType; /* Voltage or Temperature */ - SK_I32 SenValue; /* Current value of the sensor */ - SK_I32 SenThreErrHigh; /* High error Threshhold of this sensor */ - SK_I32 SenThreWarnHigh;/* High warning Threshhold of this sensor */ - SK_I32 SenThreErrLow; /* Lower error Threshold of the sensor */ - SK_I32 SenThreWarnLow; /* Lower warning Threshold of the sensor */ - int SenErrFlag; /* Sensor indicated an error */ - SK_BOOL SenInit; /* Is sensor initialized? */ - SK_U64 SenErrCts; /* Error trap counter */ - SK_U64 SenWarnCts; /* Warning trap counter */ - SK_U64 SenBegErrTS; /* Begin error timestamp */ - SK_U64 SenBegWarnTS; /* Begin warning timestamp */ + char *SenDesc; /* Description */ + int SenType; /* Voltage or Temperature */ + SK_I32 SenValue; /* Current value of the sensor */ + SK_I32 SenThreErrHigh; /* High error Threshhold of this sensor */ + SK_I32 SenThreWarnHigh; /* High warning Threshhold of this sensor */ + SK_I32 SenThreErrLow; /* Lower error Threshold of the sensor */ + SK_I32 SenThreWarnLow; /* Lower warning Threshold of the sensor */ + int SenErrFlag; /* Sensor indicated an error */ + SK_BOOL SenInit; /* Is sensor initialized ? */ + SK_U64 SenErrCts; /* Error trap counter */ + SK_U64 SenWarnCts; /* Warning trap counter */ + SK_U64 SenBegErrTS; /* Begin error timestamp */ + SK_U64 SenBegWarnTS; /* Begin warning timestamp */ SK_U64 SenLastErrTrapTS; /* Last error trap timestamp */ SK_U64 SenLastErrLogTS; /* Last error log timestamp */ SK_U64 SenLastWarnTrapTS; /* Last warning trap timestamp */ SK_U64 SenLastWarnLogTS; /* Last warning log timestamp */ - int SenState; /* Sensor State (see HW specific include) */ - int (*SenRead)(SK_AC *pAC, SK_IOC IoC,struct s_Sensor *pSen) ; - /* Sensors read function */ - SK_U16 SenReg; /* Register Address for this sensor */ - SK_U8 SenDev; /* DeviceSelection for this sensor */ -} ; - + int SenState; /* Sensor State (see HW specific include) */ + int (*SenRead)(SK_AC *pAC, SK_IOC IoC, struct s_Sensor *pSen); + /* Sensors read function */ + SK_U16 SenReg; /* Register Address for this sensor */ + SK_U8 SenDev; /* Device Selection for this sensor */ +}; typedef struct s_I2c { SK_SENSOR SenTable[SK_MAX_SENSORS]; /* Sensor Table */ - int CurrSens; /* Which sensor is currently queried */ - int MaxSens; /* Max. number of sensors */ - int InitLevel; /* Initialized Level */ + int CurrSens; /* Which sensor is currently queried */ + int MaxSens; /* Max. number of sensors */ + int TimerMode; /* Use the timer also to watch the state machine */ + int InitLevel; /* Initialized Level */ #ifndef SK_DIAG - int DummyReads; /* Number of non-checked dummy reads */ + int DummyReads; /* Number of non-checked dummy reads */ SK_TIMER SenTimer; /* Sensors timer */ -#endif /* !SK_DIAG */ +#endif /* !SK_DIAG */ } SK_I2C; extern int SkI2cReadSensor(SK_AC *pAC, SK_IOC IoC, SK_SENSOR *pSen); @@ -273,5 +286,5 @@ extern void SkI2cIsr(SK_AC *pAC, SK_IOC IoC); #endif -#endif /* n_SKI2C_H */ +#endif /* n_SKI2C_H */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/skqueue.h linux.21pre4-ac2/drivers/net/sk98lin/h/skqueue.h --- linux.21pre4/drivers/net/sk98lin/h/skqueue.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/skqueue.h 2003-01-06 15:38:18.000000000 +0000 @@ -1,32 +1,23 @@ /****************************************************************************** * * Name: skqueue.h - * Project: Genesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.12 $ - * Date: $Date: 1998/09/08 08:48:01 $ + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.14 $ + * Date: $Date: 2002/03/15 10:52:13 $ * Purpose: Defines for the Event queue * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1989-1998 SysKonnect, + * (C)Copyright 1998,1999 SysKonnect, * a business unit of Schneider & Koch & Co. Datensysteme GmbH. - * All Rights Reserved * - * THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF SYSKONNECT - * The copyright notice above does not evidence any - * actual or intended publication of such source code. - * - * This Module contains Proprietary Information of SysKonnect - * and should be treated as Confidential. - * - * The information in this file is provided for the exclusive use of - * the licensees of SysKonnect. - * Such users have the right to use, modify, and incorporate this code - * into products for purposes authorized by the license agreement - * provided they include this notice and the associated copyright notice - * with any such product. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * * The information in this file is provided "AS IS" without warranty. * ******************************************************************************/ @@ -36,6 +27,12 @@ * History: * * $Log: skqueue.h,v $ + * Revision 1.14 2002/03/15 10:52:13 mkunz + * Added event classes for link aggregation + * + * Revision 1.13 1999/11/22 13:59:05 cgoos + * Changed license header to GPL. + * * Revision 1.12 1998/09/08 08:48:01 gklug * add: init level handling * @@ -97,6 +94,12 @@ #define SKGE_CSUM 5 /* Checksum Event Class */ #define SKGE_HWAC 6 /* Hardware Access Event Class */ +#define SKGE_SWT 9 /* Software Timer Event Class */ +#define SKGE_LACP 10 /* LACP Aggregation Event Class */ +#define SKGE_RSF 11 /* RSF Aggregation Event Class */ +#define SKGE_MARKER 12 /* MARKER Aggregation Event Class */ +#define SKGE_FD 13 /* FD Distributor Event Class */ + /* * define event queue as circular buffer */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/skrlmt.h linux.21pre4-ac2/drivers/net/sk98lin/h/skrlmt.h --- linux.21pre4/drivers/net/sk98lin/h/skrlmt.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/skrlmt.h 2003-01-06 15:38:18.000000000 +0000 @@ -2,8 +2,8 @@ * * Name: skrlmt.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.32 $ - * Date: $Date: 2001/02/14 14:06:31 $ + * Version: $Revision: 1.33 $ + * Date: $Date: 2001/07/03 12:16:48 $ * Purpose: Header file for Redundant Link ManagemenT. * ******************************************************************************/ @@ -26,6 +26,9 @@ * History: * * $Log: skrlmt.h,v $ + * Revision 1.33 2001/07/03 12:16:48 mkunz + * New Flag ChgBcPrio (Change priority of last broadcast received) + * * Revision 1.32 2001/02/14 14:06:31 rassmann * Editorial changes. * @@ -468,6 +471,7 @@ /* For PNMI */ + SK_U32 ChgBcPrio; /* Change Priority of last broadcast received */ SK_U32 RlmtMode; /* Check ... */ SK_U32 ActivePort; /* Active port. */ SK_U32 Preference; /* 0xFFFFFFFF: Automatic. */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/sktimer.h linux.21pre4-ac2/drivers/net/sk98lin/h/sktimer.h --- linux.21pre4/drivers/net/sk98lin/h/sktimer.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/sktimer.h 2003-01-06 15:38:18.000000000 +0000 @@ -1,32 +1,23 @@ /****************************************************************************** * * Name: sktimer.h - * Project: Genesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.8 $ - * Date: $Date: 1998/09/08 08:48:02 $ + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.9 $ + * Date: $Date: 1999/11/22 14:00:29 $ * Purpose: Defines for the timer functions * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1989-1998 SysKonnect, + * (C)Copyright 1998,1999 SysKonnect, * a business unit of Schneider & Koch & Co. Datensysteme GmbH. - * All Rights Reserved * - * THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF SYSKONNECT - * The copyright notice above does not evidence any - * actual or intended publication of such source code. - * - * This Module contains Proprietary Information of SysKonnect - * and should be treated as Confidential. - * - * The information in this file is provided for the exclusive use of - * the licensees of SysKonnect. - * Such users have the right to use, modify, and incorporate this code - * into products for purposes authorized by the license agreement - * provided they include this notice and the associated copyright notice - * with any such product. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * * The information in this file is provided "AS IS" without warranty. * ******************************************************************************/ @@ -36,6 +27,9 @@ * History: * * $Log: sktimer.h,v $ + * Revision 1.9 1999/11/22 14:00:29 cgoos + * Changed license header to GPL. + * * Revision 1.8 1998/09/08 08:48:02 gklug * add: init level handling * diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/skversion.h linux.21pre4-ac2/drivers/net/sk98lin/h/skversion.h --- linux.21pre4/drivers/net/sk98lin/h/skversion.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/skversion.h 2003-01-06 15:38:18.000000000 +0000 @@ -2,15 +2,15 @@ * * Name: version.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.1 $ - * Date: $Date: 2001/03/06 09:25:00 $ + * Version: $Revision: 1.1.2.1 $ + * Date: $Date: 2001/09/05 13:38:30 $ * Purpose: SK specific Error log support * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998,1999 SysKonnect, + * (C)Copyright 1998-2002 SysKonnect, * a business unit of Schneider & Koch & Co. Datensysteme GmbH. * * This program is free software; you can redistribute it and/or modify @@ -26,6 +26,9 @@ * * History: * $Log: skversion.h,v $ + * Revision 1.1.2.1 2001/09/05 13:38:30 mlindner + * Removed FILE description + * * Revision 1.1 2001/03/06 09:25:00 mlindner * first version * @@ -34,13 +37,13 @@ ******************************************************************************/ -static const char SysKonnectFileId[] = "@(#)" __FILE__ " (C) SysKonnect GmbH."; +static const char SysKonnectFileId[] = "@(#) (C) SysKonnect GmbH."; static const char SysKonnectBuildNumber[] = - "@(#)SK-BUILD: 4.06 PL: 01"; + "@(#)SK-BUILD: 6.02 PL: 01"; -#define BOOT_STRING "sk98lin: Network Device Driver v4.06\n" \ - "Copyright (C) 2000-2001 SysKonnect GmbH." +#define BOOT_STRING "sk98lin: Network Device Driver v6.02\n" \ + "Copyright (C) 2000-2002 SysKonnect GmbH." -#define VER_STRING "4.06" +#define VER_STRING "6.02" diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/skvpd.h linux.21pre4-ac2/drivers/net/sk98lin/h/skvpd.h --- linux.21pre4/drivers/net/sk98lin/h/skvpd.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/skvpd.h 2003-01-06 15:38:18.000000000 +0000 @@ -2,16 +2,15 @@ * * Name: skvpd.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.9 $ - * Date: $Date: 1999/11/22 14:02:27 $ + * Version: $Revision: 1.13 $ + * Date: $Date: 2002/10/14 15:58:18 $ * Purpose: Defines and Macros for VPD handling * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998,1999 SysKonnect, - * a business unit of Schneider & Koch & Co. Datensysteme GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,6 +26,25 @@ * History: * * $Log: skvpd.h,v $ + * Revision 1.13 2002/10/14 15:58:18 rschmidt + * Added entry in rom_size struct s_vpd + * Editorial changes + * + * Revision 1.12 2002/09/09 14:43:51 mkarl + * added PCI Id of Yukon for reading VPD in diag before the adapter has + * been initialized + * editorial changes + * + * Revision 1.11 2002/07/26 13:19:16 mkarl + * added support for Yukon + * added vpd_size to VPD struct + * + * Revision 1.10 2000/08/10 11:29:07 rassmann + * Editorial changes. + * Preserving 32-bit alignment in structs for the adapter context. + * Removed unused function VpdWriteDword() (#if 0). + * Made VpdReadKeyword() available for SKDIAG only. + * * Revision 1.9 1999/11/22 14:02:27 cgoos * Changed license header to GPL. * @@ -99,7 +117,12 @@ /* * Define READ and WRITE Constants. */ -#define VPD_SIZE 512 + +#define VPD_PCI_ID_YUKON 0x4320 + +#define VPD_SIZE_YUKON 256 +#define VPD_SIZE_GENESIS 512 +#define VPD_SIZE 512 #define VPD_READ 0x0000 #define VPD_WRITE 0x8000 @@ -115,40 +138,44 @@ /* VPD status */ /* bit 7..1 reserved */ -#define VPD_VALID (1<<0) /* VPD data buffer, vpd_free_ro, */ - /* and vpd_free_rw valid */ +#define VPD_VALID (1<<0) /* VPD data buffer, vpd_free_ro, */ + /* and vpd_free_rw valid */ /* * VPD structs */ typedef struct s_vpd_status { - unsigned short vpd_status ; /* VPD status, description see above */ - int vpd_free_ro ; /* unused bytes in read only area */ - int vpd_free_rw ; /* bytes available in read/write area */ + unsigned short Align01; /* Alignment */ + unsigned short vpd_status; /* VPD status, description see above */ + int vpd_free_ro; /* unused bytes in read only area */ + int vpd_free_rw; /* bytes available in read/write area */ } SK_VPD_STATUS; typedef struct s_vpd { - SK_VPD_STATUS v ; /* VPD status structure */ - char vpd_buf[VPD_SIZE] ; /* VPD buffer */ + SK_VPD_STATUS v; /* VPD status structure */ + char vpd_buf[VPD_SIZE]; /* VPD buffer */ + int rom_size; /* VPD ROM Size from PCI_OUR_REG_2 */ + int vpd_size; /* saved VPD-size */ } SK_VPD; typedef struct s_vpd_para { - unsigned int p_len ; /* parameter length */ - char *p_val ; /* points to the value */ + unsigned int p_len; /* parameter length */ + char *p_val; /* points to the value */ } SK_VPD_PARA; /* * structure of Large Resource Type Identifiers */ -/* was removed, because of alignment problems */ + +/* was removed because of alignment problems */ /* - * sturcture of VPD keywords + * structure of VPD keywords */ typedef struct s_vpd_key { - char p_key[2] ; /* 2 bytes ID string */ - unsigned char p_len ; /* 1 byte length */ - char p_val ; /* start of the value string */ + char p_key[2]; /* 2 bytes ID string */ + unsigned char p_len; /* 1 byte length */ + char p_val; /* start of the value string */ } SK_VPD_KEY; @@ -172,39 +199,39 @@ #define VPD_IN32(pAC,IoC,Addr,pVal) SK_IN32(IoC,PCI_C(Addr),pVal) #endif /* VPD_DO_IO */ #else /* SKDIAG */ -#define VPD_OUT8(pAC,Ioc,Addr,Val) { \ +#define VPD_OUT8(pAC,Ioc,Addr,Val) { \ if ((pAC)->DgT.DgUseCfgCycle) \ - SkPciWriteCfgByte(pAC,Addr,Val) ; \ - else \ + SkPciWriteCfgByte(pAC,Addr,Val); \ + else \ SK_OUT8(pAC,PCI_C(Addr),Val); \ } -#define VPD_OUT16(pAC,Ioc,Addr,Val) { \ +#define VPD_OUT16(pAC,Ioc,Addr,Val) { \ if ((pAC)->DgT.DgUseCfgCycle) \ - SkPciWriteCfgWord(pAC,Addr,Val) ; \ + SkPciWriteCfgWord(pAC,Addr,Val); \ else \ SK_OUT16(pAC,PCI_C(Addr),Val); \ } -#define VPD_OUT32(pAC,Ioc,Addr,Val) { \ +#define VPD_OUT32(pAC,Ioc,Addr,Val) { \ if ((pAC)->DgT.DgUseCfgCycle) \ - SkPciWriteCfgDWord(pAC,Addr,Val) ; \ + SkPciWriteCfgDWord(pAC,Addr,Val); \ else \ SK_OUT32(pAC,PCI_C(Addr),Val); \ } -#define VPD_IN8(pAC,Ioc,Addr,pVal) { \ +#define VPD_IN8(pAC,Ioc,Addr,pVal) { \ if ((pAC)->DgT.DgUseCfgCycle) \ - SkPciReadCfgByte(pAC,Addr,pVal) ; \ + SkPciReadCfgByte(pAC,Addr,pVal); \ else \ SK_IN8(pAC,PCI_C(Addr),pVal); \ } -#define VPD_IN16(pAC,Ioc,Addr,pVal) { \ +#define VPD_IN16(pAC,Ioc,Addr,pVal) { \ if ((pAC)->DgT.DgUseCfgCycle) \ - SkPciReadCfgWord(pAC,Addr,pVal) ; \ + SkPciReadCfgWord(pAC,Addr,pVal); \ else \ SK_IN16(pAC,PCI_C(Addr),pVal); \ } -#define VPD_IN32(pAC,Ioc,Addr,pVal) { \ +#define VPD_IN32(pAC,Ioc,Addr,pVal) { \ if ((pAC)->DgT.DgUseCfgCycle) \ - SkPciReadCfgDWord(pAC,Addr,pVal) ; \ + SkPciReadCfgDWord(pAC,Addr,pVal); \ else \ SK_IN32(pAC,PCI_C(Addr),pVal); \ } @@ -213,50 +240,52 @@ /* function prototypes ********************************************************/ #ifndef SK_KR_PROTO +#ifdef SKDIAG extern SK_U32 VpdReadDWord( SK_AC *pAC, SK_IOC IoC, - int addr) ; + int addr); +#endif /* SKDIAG */ extern int VpdSetupPara( SK_AC *pAC, char *key, char *buf, - int len, - int type, - int op) ; + int len, + int type, + int op); extern SK_VPD_STATUS *VpdStat( SK_AC *pAC, - SK_IOC IoC) ; + SK_IOC IoC); extern int VpdKeys( SK_AC *pAC, SK_IOC IoC, char *buf, - int *len, - int *elements) ; + int *len, + int *elements); extern int VpdRead( SK_AC *pAC, SK_IOC IoC, char *key, char *buf, - int *len) ; + int *len); -extern SK_BOOL VpdMayWrite( - char *key) ; +extern SK_BOOL VpdMayWrite( + char *key); extern int VpdWrite( SK_AC *pAC, SK_IOC IoC, char *key, - char *buf) ; + char *buf); extern int VpdDelete( SK_AC *pAC, SK_IOC IoC, - char *key) ; + char *key); extern int VpdUpdate( SK_AC *pAC, @@ -265,34 +294,34 @@ extern void VpdErrLog( SK_AC *pAC, SK_IOC IoC, - char *msg) ; + char *msg); #ifdef SKDIAG extern int VpdReadBlock( SK_AC *pAC, SK_IOC IoC, char *buf, - int addr, - int len) ; + int addr, + int len); extern int VpdWriteBlock( SK_AC *pAC, SK_IOC IoC, char *buf, - int addr, - int len) ; + int addr, + int len); #endif /* SKDIAG */ #else /* SK_KR_PROTO */ -extern SK_U32 VpdReadDWord() ; -extern int VpdSetupPara() ; -extern SK_VPD_STATUS *VpdStat() ; -extern int VpdKeys() ; -extern int VpdRead() ; -extern SK_BOOL VpdMayWrite() ; -extern int VpdWrite() ; -extern int VpdDelete() ; -extern int VpdUpdate() ; -extern void VpdErrLog() ; +extern SK_U32 VpdReadDWord(); +extern int VpdSetupPara(); +extern SK_VPD_STATUS *VpdStat(); +extern int VpdKeys(); +extern int VpdRead(); +extern SK_BOOL VpdMayWrite(); +extern int VpdWrite(); +extern int VpdDelete(); +extern int VpdUpdate(); +extern void VpdErrLog(); #endif /* SK_KR_PROTO */ #endif /* __INC_SKVPD_H_ */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/h/xmac_ii.h linux.21pre4-ac2/drivers/net/sk98lin/h/xmac_ii.h --- linux.21pre4/drivers/net/sk98lin/h/xmac_ii.h 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/h/xmac_ii.h 2003-01-06 15:38:18.000000000 +0000 @@ -2,15 +2,15 @@ * * Name: xmac_ii.h * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.28 $ - * Date: $Date: 2000/11/09 12:32:49 $ - * Purpose: Defines and Macros for XaQti's Gigabit Ethernet Controller + * Version: $Revision: 1.45 $ + * Date: $Date: 2002/12/10 14:35:13 $ + * Purpose: Defines and Macros for Gigabit Ethernet Controller * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2000 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +26,77 @@ * History: * * $Log: xmac_ii.h,v $ + * Revision 1.45 2002/12/10 14:35:13 rschmidt + * Corrected defines for Extended PHY Specific Control + * Added defines for Ext. PHY Specific Ctrl 2 Reg. (Fiber specific) + * + * Revision 1.44 2002/12/09 14:58:41 rschmidt + * Added defines for Ext. PHY Specific Ctrl Reg. (downshift feature) + * Added 'GMR_FS_UN_SIZE'-Bit to Rx GMAC FIFO Flush Mask + * + * Revision 1.43 2002/12/05 10:14:45 rschmidt + * Added define for GMAC's Half Duplex Burst Mode + * Added define for Rx GMAC FIFO Flush Mask (default) + * + * Revision 1.42 2002/11/12 16:48:19 rschmidt + * Added defines for Cable Diagnostic Register (GPHY) + * Editorial changes + * + * Revision 1.41 2002/10/21 11:20:22 rschmidt + * Added bit GMR_FS_GOOD_FC to GMR_FS_ANY_ERR + * Editorial changes + * + * Revision 1.40 2002/10/14 14:54:14 rschmidt + * Added defines for GPHY Specific Status and GPHY Interrupt Status + * Added bits PHY_M_IS_AN_ERROR and PHY_M_IS_FIFO_ERROR to PHY_M_DEF_MSK + * Editorial changes + * + * Revision 1.39 2002/10/10 15:53:44 mkarl + * added some bit definitions for link speed status and LED's + * + * Revision 1.38 2002/08/21 16:23:46 rschmidt + * Added defines for PHY Specific Ctrl Reg + * Editorial changes + * + * Revision 1.37 2002/08/16 14:50:33 rschmidt + * Added defines for Auto-Neg. Advertisement YUKON Fiber (88E1011S only) + * Changed define PHY_M_DEF_MSK for GPHY IRQ Mask + * Editorial changes + * + * Revision 1.36 2002/08/12 13:21:10 rschmidt + * Added defines for different Broadcom PHY Ids + * + * Revision 1.35 2002/08/08 15:58:01 rschmidt + * Added defines for Manual LED Override register (YUKON) + * Editorial changes + * + * Revision 1.34 2002/07/31 17:23:36 rwahl + * Added define GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR). + * + * Revision 1.33 2002/07/23 16:03:37 rschmidt + * Added defines for GPHY registers + * Editorial changes + * + * Revision 1.32 2002/07/15 18:14:37 rwahl + * Added GMAC MIB counters definitions. + * Editorial changes. + * + * Revision 1.31 2002/07/15 15:42:50 rschmidt + * Removed defines from PHY specific reg. which are + * common to all PHYs + * Added defines for GMAC MIB Counters + * Editorial changes + * + * Revision 1.30 2002/06/05 08:22:12 rschmidt + * Changed defines for GMAC Rx Control Register and Rx Status + * Editorial changes + * + * Revision 1.29 2002/04/25 11:43:56 rschmidt + * Added define PHY_B_AS_PAUSE_MSK for BCom Pause Res. + * Added new registers and defines for YUKON (GMAC, GPHY) + * Added Receive Frame Status Encoding for YUKON + * Editorial changes + * * Revision 1.28 2000/11/09 12:32:49 rassmann * Renamed variables. * @@ -82,7 +153,7 @@ * Changed shifted constant to ULONG. * * Revision 1.11 1998/10/14 05:43:26 gklug - * add: shift pause codeing + * add: shift pause coding * fix: PAUSE bits definition * * Revision 1.10 1998/10/13 09:19:21 malthoff @@ -135,61 +206,61 @@ /* * XMAC II registers * - * The XMAC registers are 16 or 32 bits wide. The XMACs host processor - * interface is set to 16 bit mode, therefore ALL registers will be - * addressed with 16 bit accesses. + * The XMAC registers are 16 or 32 bits wide. + * The XMACs host processor interface is set to 16 bit mode, + * therefore ALL registers will be addressed with 16 bit accesses. * * The following macros are provided to access the XMAC registers - * XM_IN16(), XM_OUT16, XM_IN32(), MX_OUT32(), XM_INADR(), XM_OUTADR(), + * XM_IN16(), XM_OUT16, XM_IN32(), XM_OUT32(), XM_INADR(), XM_OUTADR(), * XM_INHASH(), and XM_OUTHASH(). * The macros are defined in SkGeHw.h. * * Note: NA reg = Network Address e.g DA, SA etc. * */ -#define XM_MMU_CMD 0x0000 /* 16 bit r/w MMU Command Register */ +#define XM_MMU_CMD 0x0000 /* 16 bit r/w MMU Command Register */ /* 0x0004: reserved */ -#define XM_POFF 0x0008 /* 32 bit r/w Packet Offset Register */ -#define XM_BURST 0x000c /* 32 bit r/w Burst Register for half duplex*/ +#define XM_POFF 0x0008 /* 32 bit r/w Packet Offset Register */ +#define XM_BURST 0x000c /* 32 bit r/w Burst Register for half duplex*/ #define XM_1L_VLAN_TAG 0x0010 /* 16 bit r/w One Level VLAN Tag ID */ #define XM_2L_VLAN_TAG 0x0014 /* 16 bit r/w Two Level VLAN Tag ID */ /* 0x0018 - 0x001e: reserved */ -#define XM_TX_CMD 0x0020 /* 16 bit r/w Transmit Command Register */ -#define XM_TX_RT_LIM 0x0024 /* 16 bit r/w Transmit Retry Limit Register */ -#define XM_TX_STIME 0x0028 /* 16 bit r/w Transmit Slottime Register */ -#define XM_TX_IPG 0x002c /* 16 bit r/w Transmit Inter Packet Gap */ -#define XM_RX_CMD 0x0030 /* 16 bit r/w Receive Command Register */ -#define XM_PHY_ADDR 0x0034 /* 16 bit r/w PHY Address Register */ -#define XM_PHY_DATA 0x0038 /* 16 bit r/w PHY Data Register */ +#define XM_TX_CMD 0x0020 /* 16 bit r/w Transmit Command Register */ +#define XM_TX_RT_LIM 0x0024 /* 16 bit r/w Transmit Retry Limit Register */ +#define XM_TX_STIME 0x0028 /* 16 bit r/w Transmit Slottime Register */ +#define XM_TX_IPG 0x002c /* 16 bit r/w Transmit Inter Packet Gap */ +#define XM_RX_CMD 0x0030 /* 16 bit r/w Receive Command Register */ +#define XM_PHY_ADDR 0x0034 /* 16 bit r/w PHY Address Register */ +#define XM_PHY_DATA 0x0038 /* 16 bit r/w PHY Data Register */ /* 0x003c: reserved */ -#define XM_GP_PORT 0x0040 /* 32 bit r/w General Purpose Port Register */ -#define XM_IMSK 0x0044 /* 16 bit r/w Interrupt Mask Register */ -#define XM_ISRC 0x0048 /* 16 bit ro Interrupt Status Register */ -#define XM_HW_CFG 0x004c /* 16 bit r/w Hardware Config Register */ +#define XM_GP_PORT 0x0040 /* 32 bit r/w General Purpose Port Register */ +#define XM_IMSK 0x0044 /* 16 bit r/w Interrupt Mask Register */ +#define XM_ISRC 0x0048 /* 16 bit r/o Interrupt Status Register */ +#define XM_HW_CFG 0x004c /* 16 bit r/w Hardware Config Register */ /* 0x0050 - 0x005e: reserved */ -#define XM_TX_LO_WM 0x0060 /* 16 bit r/w Tx FIFO Low Water Mark */ -#define XM_TX_HI_WM 0x0062 /* 16 bit r/w Tx FIFO High Water Mark */ -#define XM_TX_THR 0x0064 /* 16 bit r/w Tx Request Threshold */ -#define XM_HT_THR 0x0066 /* 16 bit r/w Host Request Threshold */ -#define XM_PAUSE_DA 0x0068 /* NA reg r/w Pause Destination Address */ +#define XM_TX_LO_WM 0x0060 /* 16 bit r/w Tx FIFO Low Water Mark */ +#define XM_TX_HI_WM 0x0062 /* 16 bit r/w Tx FIFO High Water Mark */ +#define XM_TX_THR 0x0064 /* 16 bit r/w Tx Request Threshold */ +#define XM_HT_THR 0x0066 /* 16 bit r/w Host Request Threshold */ +#define XM_PAUSE_DA 0x0068 /* NA reg r/w Pause Destination Address */ /* 0x006e: reserved */ -#define XM_CTL_PARA 0x0070 /* 32 bit r/w Control Parameter Register */ +#define XM_CTL_PARA 0x0070 /* 32 bit r/w Control Parameter Register */ #define XM_MAC_OPCODE 0x0074 /* 16 bit r/w Opcode for MAC control frames */ #define XM_MAC_PTIME 0x0076 /* 16 bit r/w Pause time for MAC ctrl frames*/ -#define XM_TX_STAT 0x0078 /* 32 bit ro Tx Status LIFO Register */ +#define XM_TX_STAT 0x0078 /* 32 bit r/o Tx Status LIFO Register */ - /* 0x0080 - 0x00fc: 16 NA reg r/w Exakt Match Address Registers */ - /* use the XM_EMX() macro to address */ + /* 0x0080 - 0x00fc: 16 NA reg r/w Exact Match Address Registers */ + /* use the XM_EXM() macro to address */ #define XM_EXM_START 0x0080 /* r/w Start Address of the EXM Regs */ /* * XM_EXM(Reg) * - * returns the XMAC address offset off specified Exakt Match Addr Reg + * returns the XMAC address offset of specified Exact Match Addr Reg * * para: Reg EXM register to addr (0 .. 15) * - * usage: XM_INADDR(XMAC_1,pAC,XM_EXM(i),&val[i]) ; + * usage: XM_INADDR(IoC, MAC_1, XM_EXM(i), &val[i]); */ #define XM_EXM(Reg) (XM_EXM_START + ((Reg) << 3)) @@ -199,78 +270,78 @@ #define XM_RX_LO_WM 0x0118 /* 16 bit r/w Receive Low Water Mark */ #define XM_RX_HI_WM 0x011a /* 16 bit r/w Receive High Water Mark */ #define XM_RX_THR 0x011c /* 32 bit r/w Receive Request Threshold */ -#define XM_DEV_ID 0x0120 /* 32 bit ro Device ID Register */ +#define XM_DEV_ID 0x0120 /* 32 bit r/o Device ID Register */ #define XM_MODE 0x0124 /* 32 bit r/w Mode Register */ -#define XM_LSA 0x0128 /* NA reg ro Last Source Register */ +#define XM_LSA 0x0128 /* NA reg r/o Last Source Register */ /* 0x012e: reserved */ -#define XM_TS_READ 0x0130 /* 32 bit ro TimeStamp Read Regeister */ -#define XM_TS_LOAD 0x0134 /* 32 bit ro TimeStamp Load Value */ +#define XM_TS_READ 0x0130 /* 32 bit r/o Time Stamp Read Register */ +#define XM_TS_LOAD 0x0134 /* 32 bit r/o Time Stamp Load Value */ /* 0x0138 - 0x01fe: reserved */ #define XM_STAT_CMD 0x0200 /* 16 bit r/w Statistics Command Register */ -#define XM_RX_CNT_EV 0x0204 /* 32 bit ro Rx Counter Event Register */ -#define XM_TX_CNT_EV 0x0208 /* 32 bit ro Tx Counter Event Register */ +#define XM_RX_CNT_EV 0x0204 /* 32 bit r/o Rx Counter Event Register */ +#define XM_TX_CNT_EV 0x0208 /* 32 bit r/o Tx Counter Event Register */ #define XM_RX_EV_MSK 0x020c /* 32 bit r/w Rx Counter Event Mask */ #define XM_TX_EV_MSK 0x0210 /* 32 bit r/w Tx Counter Event Mask */ /* 0x0204 - 0x027e: reserved */ -#define XM_TXF_OK 0x0280 /* 32 bit ro Frames Transmitted OK Conuter */ -#define XM_TXO_OK_HI 0x0284 /* 32 bit ro Octets Transmitted OK High Cnt*/ -#define XM_TXO_OK_LO 0x0288 /* 32 bit ro Octets Transmitted OK Low Cnt */ -#define XM_TXF_BC_OK 0x028c /* 32 bit ro Broadcast Frames Xmitted OK */ -#define XM_TXF_MC_OK 0x0290 /* 32 bit ro Multicast Frames Xmitted OK */ -#define XM_TXF_UC_OK 0x0294 /* 32 bit ro Unicast Frames Xmitted OK */ -#define XM_TXF_LONG 0x0298 /* 32 bit ro Tx Long Frame Counter */ -#define XM_TXE_BURST 0x029c /* 32 bit ro Tx Burst Event Counter */ -#define XM_TXF_MPAUSE 0x02a0 /* 32 bit ro Tx Pause MAC Ctrl Frame Cnt */ -#define XM_TXF_MCTRL 0x02a4 /* 32 bit ro Tx MAC Ctrl Frame Counter */ -#define XM_TXF_SNG_COL 0x02a8 /* 32 bit ro Tx Single Colliosion Counter */ -#define XM_TXF_MUL_COL 0x02ac /* 32 bit ro Tx Multiple Collision Counter */ -#define XM_TXF_ABO_COL 0x02b0 /* 32 bit ro Tx aborted due to Exessive Col*/ -#define XM_TXF_LAT_COL 0x02b4 /* 32 bit ro Tx Late Collision Counter */ -#define XM_TXF_DEF 0x02b8 /* 32 bit ro Tx Deferred Frame Counter */ -#define XM_TXF_EX_DEF 0x02bc /* 32 bit ro Tx Excessive Deferall Counter */ -#define XM_TXE_FIFO_UR 0x02c0 /* 32 bit ro Tx FIFO Underrun Event Cnt */ -#define XM_TXE_CS_ERR 0x02c4 /* 32 bit ro Tx Carrier Sence Error Cnt */ -#define XM_TXP_UTIL 0x02c8 /* 32 bit ro Tx Utilization in % */ +#define XM_TXF_OK 0x0280 /* 32 bit r/o Frames Transmitted OK Conuter */ +#define XM_TXO_OK_HI 0x0284 /* 32 bit r/o Octets Transmitted OK High Cnt*/ +#define XM_TXO_OK_LO 0x0288 /* 32 bit r/o Octets Transmitted OK Low Cnt */ +#define XM_TXF_BC_OK 0x028c /* 32 bit r/o Broadcast Frames Xmitted OK */ +#define XM_TXF_MC_OK 0x0290 /* 32 bit r/o Multicast Frames Xmitted OK */ +#define XM_TXF_UC_OK 0x0294 /* 32 bit r/o Unicast Frames Xmitted OK */ +#define XM_TXF_LONG 0x0298 /* 32 bit r/o Tx Long Frame Counter */ +#define XM_TXE_BURST 0x029c /* 32 bit r/o Tx Burst Event Counter */ +#define XM_TXF_MPAUSE 0x02a0 /* 32 bit r/o Tx Pause MAC Ctrl Frame Cnt */ +#define XM_TXF_MCTRL 0x02a4 /* 32 bit r/o Tx MAC Ctrl Frame Counter */ +#define XM_TXF_SNG_COL 0x02a8 /* 32 bit r/o Tx Single Collision Counter */ +#define XM_TXF_MUL_COL 0x02ac /* 32 bit r/o Tx Multiple Collision Counter */ +#define XM_TXF_ABO_COL 0x02b0 /* 32 bit r/o Tx aborted due to Exces. Col. */ +#define XM_TXF_LAT_COL 0x02b4 /* 32 bit r/o Tx Late Collision Counter */ +#define XM_TXF_DEF 0x02b8 /* 32 bit r/o Tx Deferred Frame Counter */ +#define XM_TXF_EX_DEF 0x02bc /* 32 bit r/o Tx Excessive Deferall Counter */ +#define XM_TXE_FIFO_UR 0x02c0 /* 32 bit r/o Tx FIFO Underrun Event Cnt */ +#define XM_TXE_CS_ERR 0x02c4 /* 32 bit r/o Tx Carrier Sense Error Cnt */ +#define XM_TXP_UTIL 0x02c8 /* 32 bit r/o Tx Utilization in % */ /* 0x02cc - 0x02ce: reserved */ -#define XM_TXF_64B 0x02d0 /* 32 bit ro 64 Byte Tx Frame Counter */ -#define XM_TXF_127B 0x02d4 /* 32 bit ro 65-127 Byte Tx Frame Counter */ -#define XM_TXF_255B 0x02d8 /* 32 bit ro 128-255 Byte Tx Frame Counter */ -#define XM_TXF_511B 0x02dc /* 32 bit ro 256-511 Byte Tx Frame Counter */ -#define XM_TXF_1023B 0x02e0 /* 32 bit ro 512-1023 Byte Tx Frame Counter*/ -#define XM_TXF_MAX_SZ 0x02e4 /* 32 bit ro 1024-MaxSize Byte Tx Frame Cnt*/ +#define XM_TXF_64B 0x02d0 /* 32 bit r/o 64 Byte Tx Frame Counter */ +#define XM_TXF_127B 0x02d4 /* 32 bit r/o 65-127 Byte Tx Frame Counter */ +#define XM_TXF_255B 0x02d8 /* 32 bit r/o 128-255 Byte Tx Frame Counter */ +#define XM_TXF_511B 0x02dc /* 32 bit r/o 256-511 Byte Tx Frame Counter */ +#define XM_TXF_1023B 0x02e0 /* 32 bit r/o 512-1023 Byte Tx Frame Counter*/ +#define XM_TXF_MAX_SZ 0x02e4 /* 32 bit r/o 1024-MaxSize Byte Tx Frame Cnt*/ /* 0x02e8 - 0x02fe: reserved */ -#define XM_RXF_OK 0x0300 /* 32 bit ro Frames Received OK */ -#define XM_RXO_OK_HI 0x0304 /* 32 bit ro Octets Received OK High Cnt */ -#define XM_RXO_OK_LO 0x0308 /* 32 bit ro Octets Received OK Low Counter*/ -#define XM_RXF_BC_OK 0x030c /* 32 bit ro Broadcast Frames Received OK */ -#define XM_RXF_MC_OK 0x0310 /* 32 bit ro Multicast Frames Received OK */ -#define XM_RXF_UC_OK 0x0314 /* 32 bit ro Unicast Frames Received OK */ -#define XM_RXF_MPAUSE 0x0318 /* 32 bit ro Rx Pause MAC Ctrl Frame Cnt */ -#define XM_RXF_MCTRL 0x031c /* 32 bit ro Rx MAC Ctrl Frame Counter */ -#define XM_RXF_INV_MP 0x0320 /* 32 bit ro Rx invalid Pause Frame Cnt */ -#define XM_RXF_INV_MOC 0x0324 /* 32 bit ro Rx Frames with inv. MAC Opcode*/ -#define XM_RXE_BURST 0x0328 /* 32 bit ro Rx Burst Event Counter */ -#define XM_RXE_FMISS 0x032c /* 32 bit ro Rx Missed Frames Event Cnt */ -#define XM_RXF_FRA_ERR 0x0330 /* 32 bit ro Rx Framing Error Counter */ -#define XM_RXE_FIFO_OV 0x0334 /* 32 bit ro Rx FIFO overflow Event Cnt */ -#define XM_RXF_JAB_PKT 0x0338 /* 32 bit ro Rx Jabber Packet Frame Cnt */ -#define XM_RXE_CAR_ERR 0x033c /* 32 bit ro Rx Carrier Event Error Cnt */ -#define XM_RXF_LEN_ERR 0x0340 /* 32 bit ro Rx in Range Length Error */ -#define XM_RXE_SYM_ERR 0x0344 /* 32 bit ro Rx Symbol Error Counter */ -#define XM_RXE_SHT_ERR 0x0348 /* 32 bit ro Rx Short Event Error Cnt */ -#define XM_RXE_RUNT 0x034c /* 32 bit ro Rx Runt Event Counter */ -#define XM_RXF_LNG_ERR 0x0350 /* 32 bit ro Rx Frame too Long Error Cnt */ -#define XM_RXF_FCS_ERR 0x0354 /* 32 bit ro Rx Frame Check Seq. Error Cnt */ +#define XM_RXF_OK 0x0300 /* 32 bit r/o Frames Received OK */ +#define XM_RXO_OK_HI 0x0304 /* 32 bit r/o Octets Received OK High Cnt */ +#define XM_RXO_OK_LO 0x0308 /* 32 bit r/o Octets Received OK Low Counter*/ +#define XM_RXF_BC_OK 0x030c /* 32 bit r/o Broadcast Frames Received OK */ +#define XM_RXF_MC_OK 0x0310 /* 32 bit r/o Multicast Frames Received OK */ +#define XM_RXF_UC_OK 0x0314 /* 32 bit r/o Unicast Frames Received OK */ +#define XM_RXF_MPAUSE 0x0318 /* 32 bit r/o Rx Pause MAC Ctrl Frame Cnt */ +#define XM_RXF_MCTRL 0x031c /* 32 bit r/o Rx MAC Ctrl Frame Counter */ +#define XM_RXF_INV_MP 0x0320 /* 32 bit r/o Rx invalid Pause Frame Cnt */ +#define XM_RXF_INV_MOC 0x0324 /* 32 bit r/o Rx Frames with inv. MAC Opcode*/ +#define XM_RXE_BURST 0x0328 /* 32 bit r/o Rx Burst Event Counter */ +#define XM_RXE_FMISS 0x032c /* 32 bit r/o Rx Missed Frames Event Cnt */ +#define XM_RXF_FRA_ERR 0x0330 /* 32 bit r/o Rx Framing Error Counter */ +#define XM_RXE_FIFO_OV 0x0334 /* 32 bit r/o Rx FIFO overflow Event Cnt */ +#define XM_RXF_JAB_PKT 0x0338 /* 32 bit r/o Rx Jabber Packet Frame Cnt */ +#define XM_RXE_CAR_ERR 0x033c /* 32 bit r/o Rx Carrier Event Error Cnt */ +#define XM_RXF_LEN_ERR 0x0340 /* 32 bit r/o Rx in Range Length Error */ +#define XM_RXE_SYM_ERR 0x0344 /* 32 bit r/o Rx Symbol Error Counter */ +#define XM_RXE_SHT_ERR 0x0348 /* 32 bit r/o Rx Short Event Error Cnt */ +#define XM_RXE_RUNT 0x034c /* 32 bit r/o Rx Runt Event Counter */ +#define XM_RXF_LNG_ERR 0x0350 /* 32 bit r/o Rx Frame too Long Error Cnt */ +#define XM_RXF_FCS_ERR 0x0354 /* 32 bit r/o Rx Frame Check Seq. Error Cnt */ /* 0x0358 - 0x035a: reserved */ -#define XM_RXF_CEX_ERR 0x035c /* 32 bit ro Rx Carrier Ext Error Frame Cnt*/ -#define XM_RXP_UTIL 0x0360 /* 32 bit ro Rx Utilization in % */ +#define XM_RXF_CEX_ERR 0x035c /* 32 bit r/o Rx Carrier Ext Error Frame Cnt*/ +#define XM_RXP_UTIL 0x0360 /* 32 bit r/o Rx Utilization in % */ /* 0x0364 - 0x0366: reserved */ -#define XM_RXF_64B 0x0368 /* 32 bit ro 64 Byte Rx Frame Counter */ -#define XM_RXF_127B 0x036c /* 32 bit ro 65-127 Byte Rx Frame Counter */ -#define XM_RXF_255B 0x0370 /* 32 bit ro 128-255 Byte Rx Frame Counter */ -#define XM_RXF_511B 0x0374 /* 32 bit ro 256-511 Byte Rx Frame Counter */ -#define XM_RXF_1023B 0x0378 /* 32 bit ro 512-1023 Byte Rx Frame Counter*/ -#define XM_RXF_MAX_SZ 0x037c /* 32 bit ro 1024-MaxSize Byte Rx Frame Cnt*/ +#define XM_RXF_64B 0x0368 /* 32 bit r/o 64 Byte Rx Frame Counter */ +#define XM_RXF_127B 0x036c /* 32 bit r/o 65-127 Byte Rx Frame Counter */ +#define XM_RXF_255B 0x0370 /* 32 bit r/o 128-255 Byte Rx Frame Counter */ +#define XM_RXF_511B 0x0374 /* 32 bit r/o 256-511 Byte Rx Frame Counter */ +#define XM_RXF_1023B 0x0378 /* 32 bit r/o 512-1023 Byte Rx Frame Counter*/ +#define XM_RXF_MAX_SZ 0x037c /* 32 bit r/o 1024-MaxSize Byte Rx Frame Cnt*/ /* 0x02e8 - 0x02fe: reserved */ @@ -279,104 +350,104 @@ * XMAC Bit Definitions * * If the bit access behaviour differs from the register access behaviour - * (r/w, ro) this is docomented after the bit number. The following bit - * access behaviours are used: + * (r/w, r/o) this is documented after the bit number. + * The following bit access behaviours are used: * (sc) self clearing * (ro) read only */ -/* XM_MMU_CMD 16 bit r/w MMU Comamnd Register */ +/* XM_MMU_CMD 16 bit r/w MMU Command Register */ /* Bit 15..13: reserved */ #define XM_MMU_PHY_RDY (1<<12) /* Bit 12: PHY Read Ready */ #define XM_MMU_PHY_BUSY (1<<11) /* Bit 11: PHY Busy */ #define XM_MMU_IGN_PF (1<<10) /* Bit 10: Ignore Pause Frame */ -#define XM_MMU_MAC_LB (1<<9) /* Bit 9: Enable MAC Loopback */ - /* Bit 8: reserved */ -#define XM_MMU_FRC_COL (1<<7) /* Bit 7: Force Collision */ -#define XM_MMU_SIM_COL (1<<6) /* Bit 6: Simulate Collision */ -#define XM_MMU_NO_PRE (1<<5) /* Bit 5: No MDIO Preamble */ -#define XM_MMU_GMII_FD (1<<4) /* Bit 4: GMII uses Full Duplex */ -#define XM_MMU_RAT_CTRL (1<<3) /* Bit 3: Enable Rate Control */ -#define XM_MMU_GMII_LOOP (1<<2) /* Bit 2: PHY is in Lookback Mode */ -#define XM_MMU_ENA_RX (1<<1) /* Bit 1: Enable Receiver */ -#define XM_MMU_ENA_TX (1<<0) /* Bit 0: Enable Transmitter */ +#define XM_MMU_MAC_LB (1<<9) /* Bit 9: Enable MAC Loopback */ + /* Bit 8: reserved */ +#define XM_MMU_FRC_COL (1<<7) /* Bit 7: Force Collision */ +#define XM_MMU_SIM_COL (1<<6) /* Bit 6: Simulate Collision */ +#define XM_MMU_NO_PRE (1<<5) /* Bit 5: No MDIO Preamble */ +#define XM_MMU_GMII_FD (1<<4) /* Bit 4: GMII uses Full Duplex */ +#define XM_MMU_RAT_CTRL (1<<3) /* Bit 3: Enable Rate Control */ +#define XM_MMU_GMII_LOOP (1<<2) /* Bit 2: PHY is in Loopback Mode */ +#define XM_MMU_ENA_RX (1<<1) /* Bit 1: Enable Receiver */ +#define XM_MMU_ENA_TX (1<<0) /* Bit 0: Enable Transmitter */ /* XM_TX_CMD 16 bit r/w Transmit Command Register */ /* Bit 15..7: reserved */ -#define XM_TX_BK2BK (1<<6) /* Bit 6: Ignor Carrier Sense (tx Bk2Bk)*/ -#define XM_TX_ENC_BYP (1<<5) /* Bit 5: Set Encoder in Bypass Mode */ -#define XM_TX_SAM_LINE (1<<4) /* Bit 4: (sc) Start utilization calculation */ -#define XM_TX_NO_GIG_MD (1<<3) /* Bit 3: Disable Carrier Extension */ -#define XM_TX_NO_PRE (1<<2) /* Bit 2: Disable Preamble Generation */ -#define XM_TX_NO_CRC (1<<1) /* Bit 1: Disable CRC Generation */ -#define XM_TX_AUTO_PAD (1<<0) /* Bit 0: Enable Automatic Padding */ +#define XM_TX_BK2BK (1<<6) /* Bit 6: Ignor Carrier Sense (Tx Bk2Bk)*/ +#define XM_TX_ENC_BYP (1<<5) /* Bit 5: Set Encoder in Bypass Mode */ +#define XM_TX_SAM_LINE (1<<4) /* Bit 4: (sc) Start utilization calculation */ +#define XM_TX_NO_GIG_MD (1<<3) /* Bit 3: Disable Carrier Extension */ +#define XM_TX_NO_PRE (1<<2) /* Bit 2: Disable Preamble Generation */ +#define XM_TX_NO_CRC (1<<1) /* Bit 1: Disable CRC Generation */ +#define XM_TX_AUTO_PAD (1<<0) /* Bit 0: Enable Automatic Padding */ /* XM_TX_RT_LIM 16 bit r/w Transmit Retry Limit Register */ /* Bit 15..5: reserved */ -#define XM_RT_LIM_MSK 0x1f /* Bit 4..0: Tx Retry Limit */ +#define XM_RT_LIM_MSK 0x1f /* Bit 4..0: Tx Retry Limit */ /* XM_TX_STIME 16 bit r/w Transmit Slottime Register */ /* Bit 15..7: reserved */ -#define XM_STIME_MSK 0x7f /* Bit 6..0: Tx Slottime bits */ +#define XM_STIME_MSK 0x7f /* Bit 6..0: Tx Slottime bits */ /* XM_TX_IPG 16 bit r/w Transmit Inter Packet Gap */ /* Bit 15..8: reserved */ -#define XM_IPG_MSK 0xff /* Bit 7..0: IPG value bits */ +#define XM_IPG_MSK 0xff /* Bit 7..0: IPG value bits */ /* XM_RX_CMD 16 bit r/w Receive Command Register */ /* Bit 15..9: reserved */ -#define XM_RX_LENERR_OK (1<<8) /* Bit 8 don't set Rx Err bit for */ +#define XM_RX_LENERR_OK (1<<8) /* Bit 8 don't set Rx Err bit for */ /* inrange error packets */ -#define XM_RX_BIG_PK_OK (1<<7) /* Bit 7 don't set Rx Err bit for */ +#define XM_RX_BIG_PK_OK (1<<7) /* Bit 7 don't set Rx Err bit for */ /* jumbo packets */ -#define XM_RX_IPG_CAP (1<<6) /* Bit 6 repl. type field with IPG */ -#define XM_RX_TP_MD (1<<5) /* Bit 5: Enable transparent Mode */ -#define XM_RX_STRIP_FCS (1<<4) /* Bit 4: Enable FCS Stripping */ -#define XM_RX_SELF_RX (1<<3) /* Bit 3: Enable Rx of own packets */ -#define XM_RX_SAM_LINE (1<<2) /* Bit 2: (sc) Start utilization calculation */ -#define XM_RX_STRIP_PAD (1<<1) /* Bit 1: Strip pad bytes of rx frames */ -#define XM_RX_DIS_CEXT (1<<0) /* Bit 0: Disable carrier ext. check */ +#define XM_RX_IPG_CAP (1<<6) /* Bit 6 repl. type field with IPG */ +#define XM_RX_TP_MD (1<<5) /* Bit 5: Enable transparent Mode */ +#define XM_RX_STRIP_FCS (1<<4) /* Bit 4: Enable FCS Stripping */ +#define XM_RX_SELF_RX (1<<3) /* Bit 3: Enable Rx of own packets */ +#define XM_RX_SAM_LINE (1<<2) /* Bit 2: (sc) Start utilization calculation */ +#define XM_RX_STRIP_PAD (1<<1) /* Bit 1: Strip pad bytes of Rx frames */ +#define XM_RX_DIS_CEXT (1<<0) /* Bit 0: Disable carrier ext. check */ /* XM_PHY_ADDR 16 bit r/w PHY Address Register */ /* Bit 15..5: reserved */ -#define XM_PHY_ADDR_SZ 0x1f /* Bit 4..0: PHY Address bits */ +#define XM_PHY_ADDR_SZ 0x1f /* Bit 4..0: PHY Address bits */ /* XM_GP_PORT 32 bit r/w General Purpose Port Register */ /* Bit 31..7: reserved */ -#define XM_GP_ANIP (1L<<6) /* Bit 6: (ro) Auto Negotiation in Progress */ -#define XM_GP_FRC_INT (1L<<5) /* Bit 5: (sc) Force Interrupt */ - /* Bit 4: reserved */ -#define XM_GP_RES_MAC (1L<<3) /* Bit 3: (sc) Reset MAC and FIFOs */ -#define XM_GP_RES_STAT (1L<<2) /* Bit 2: (sc) Reset the statistics module */ - /* Bit 1: reserved */ -#define XM_GP_INP_ASS (1L<<0) /* Bit 0: (ro) GP Input Pin asserted */ +#define XM_GP_ANIP (1L<<6) /* Bit 6: (ro) Auto-Neg. in progress */ +#define XM_GP_FRC_INT (1L<<5) /* Bit 5: (sc) Force Interrupt */ + /* Bit 4: reserved */ +#define XM_GP_RES_MAC (1L<<3) /* Bit 3: (sc) Reset MAC and FIFOs */ +#define XM_GP_RES_STAT (1L<<2) /* Bit 2: (sc) Reset the statistics module */ + /* Bit 1: reserved */ +#define XM_GP_INP_ASS (1L<<0) /* Bit 0: (ro) GP Input Pin asserted */ /* XM_IMSK 16 bit r/w Interrupt Mask Register */ -/* XM_ISRC 16 bit ro Interrupt Status Register */ +/* XM_ISRC 16 bit r/o Interrupt Status Register */ /* Bit 15: reserved */ #define XM_IS_LNK_AE (1<<14) /* Bit 14: Link Asynchronous Event */ #define XM_IS_TX_ABORT (1<<13) /* Bit 13: Transmit Abort, late Col. etc */ #define XM_IS_FRC_INT (1<<12) /* Bit 12: Force INT bit set in GP */ #define XM_IS_INP_ASS (1<<11) /* Bit 11: Input Asserted, GP bit 0 set */ #define XM_IS_LIPA_RC (1<<10) /* Bit 10: Link Partner requests config */ -#define XM_IS_RX_PAGE (1<<9) /* Bit 9: Page Received */ -#define XM_IS_TX_PAGE (1<<8) /* Bit 8: Next Page Loaded for Transmit */ -#define XM_IS_AND (1<<7) /* Bit 7: Auto Negotiation Done */ -#define XM_IS_TSC_OV (1<<6) /* Bit 6: Time Stamp Counter Overflow */ -#define XM_IS_RXC_OV (1<<5) /* Bit 5: Rx Counter Event Overflow */ -#define XM_IS_TXC_OV (1<<4) /* Bit 4: Tx Counter Event Overflow */ -#define XM_IS_RXF_OV (1<<3) /* Bit 3: Receive FIFO Overflow */ -#define XM_IS_TXF_UR (1<<2) /* Bit 2: Transmit FIFO Underrun */ -#define XM_IS_TX_COMP (1<<1) /* Bit 1: Frame Tx Complete */ -#define XM_IS_RX_COMP (1<<0) /* Bit 0: Frame Rx Complete */ +#define XM_IS_RX_PAGE (1<<9) /* Bit 9: Page Received */ +#define XM_IS_TX_PAGE (1<<8) /* Bit 8: Next Page Loaded for Transmit */ +#define XM_IS_AND (1<<7) /* Bit 7: Auto-Negotiation Done */ +#define XM_IS_TSC_OV (1<<6) /* Bit 6: Time Stamp Counter Overflow */ +#define XM_IS_RXC_OV (1<<5) /* Bit 5: Rx Counter Event Overflow */ +#define XM_IS_TXC_OV (1<<4) /* Bit 4: Tx Counter Event Overflow */ +#define XM_IS_RXF_OV (1<<3) /* Bit 3: Receive FIFO Overflow */ +#define XM_IS_TXF_UR (1<<2) /* Bit 2: Transmit FIFO Underrun */ +#define XM_IS_TX_COMP (1<<1) /* Bit 1: Frame Tx Complete */ +#define XM_IS_RX_COMP (1<<0) /* Bit 0: Frame Rx Complete */ #define XM_DEF_MSK (~(XM_IS_INP_ASS | XM_IS_LIPA_RC | XM_IS_RX_PAGE |\ XM_IS_AND | XM_IS_RXC_OV | XM_IS_TXC_OV | XM_IS_TXF_UR)) @@ -384,50 +455,50 @@ /* XM_HW_CFG 16 bit r/w Hardware Config Register */ /* Bit 15.. 4: reserved */ -#define XM_HW_GEN_EOP (1<<3) /* Bit 3: generate End of Packet pulse */ -#define XM_HW_COM4SIG (1<<2) /* Bit 2: use Comma Detect for Sig. Det.*/ - /* Bit 1: reserved */ -#define XM_HW_GMII_MD (1<<0) /* Bit 0: GMII Interface selected */ +#define XM_HW_GEN_EOP (1<<3) /* Bit 3: generate End of Packet pulse */ +#define XM_HW_COM4SIG (1<<2) /* Bit 2: use Comma Detect for Sig. Det.*/ + /* Bit 1: reserved */ +#define XM_HW_GMII_MD (1<<0) /* Bit 0: GMII Interface selected */ /* XM_TX_LO_WM 16 bit r/w Tx FIFO Low Water Mark */ /* XM_TX_HI_WM 16 bit r/w Tx FIFO High Water Mark */ /* Bit 15..10 reserved */ -#define XM_TX_WM_MSK 0x01ff /* Bit 9.. 0 Tx FIFO Watermark bits */ +#define XM_TX_WM_MSK 0x01ff /* Bit 9.. 0 Tx FIFO Watermark bits */ /* XM_TX_THR 16 bit r/w Tx Request Threshold */ /* XM_HT_THR 16 bit r/w Host Request Threshold */ -/* XM_RX_THR 16 bit r/w Receive Request Threshold */ +/* XM_RX_THR 16 bit r/w Rx Request Threshold */ /* Bit 15..11 reserved */ -#define XM_THR_MSK 0x03ff /* Bit 10.. 0 Tx FIFO Watermark bits */ +#define XM_THR_MSK 0x03ff /* Bit 10.. 0 Rx/Tx Request Threshold bits */ -/* XM_TX_STAT 32 bit ro Tx Status LIFO Register */ +/* XM_TX_STAT 32 bit r/o Tx Status LIFO Register */ #define XM_ST_VALID (1UL<<31) /* Bit 31: Status Valid */ #define XM_ST_BYTE_CNT (0x3fffL<<17) /* Bit 30..17: Tx frame Length */ #define XM_ST_RETRY_CNT (0x1fL<<12) /* Bit 16..12: Retry Count */ #define XM_ST_EX_COL (1L<<11) /* Bit 11: Excessive Collisions */ #define XM_ST_EX_DEF (1L<<10) /* Bit 10: Excessive Deferral */ -#define XM_ST_BURST (1L<<9) /* Bit 9: p. xmitted in burst md*/ -#define XM_ST_DEFER (1L<<8) /* Bit 8: packet was defered */ -#define XM_ST_BC (1L<<7) /* Bit 7: Broadcast packet */ -#define XM_ST_MC (1L<<6) /* Bit 6: Multicast packet */ -#define XM_ST_UC (1L<<5) /* Bit 5: Unicast packet */ -#define XM_ST_TX_UR (1L<<4) /* Bit 4: FIFO Underrun occured */ -#define XM_ST_CS_ERR (1L<<3) /* Bit 3: Carrier Sense Error */ -#define XM_ST_LAT_COL (1L<<2) /* Bit 2: Late Collision Error */ -#define XM_ST_MUL_COL (1L<<1) /* Bit 1: Multiple Collisions */ -#define XM_ST_SGN_COL (1L<<0) /* Bit 0: Single Collision */ +#define XM_ST_BURST (1L<<9) /* Bit 9: p. xmitted in burst md*/ +#define XM_ST_DEFER (1L<<8) /* Bit 8: packet was defered */ +#define XM_ST_BC (1L<<7) /* Bit 7: Broadcast packet */ +#define XM_ST_MC (1L<<6) /* Bit 6: Multicast packet */ +#define XM_ST_UC (1L<<5) /* Bit 5: Unicast packet */ +#define XM_ST_TX_UR (1L<<4) /* Bit 4: FIFO Underrun occured */ +#define XM_ST_CS_ERR (1L<<3) /* Bit 3: Carrier Sense Error */ +#define XM_ST_LAT_COL (1L<<2) /* Bit 2: Late Collision Error */ +#define XM_ST_MUL_COL (1L<<1) /* Bit 1: Multiple Collisions */ +#define XM_ST_SGN_COL (1L<<0) /* Bit 0: Single Collision */ /* XM_RX_LO_WM 16 bit r/w Receive Low Water Mark */ /* XM_RX_HI_WM 16 bit r/w Receive High Water Mark */ /* Bit 15..11: reserved */ -#define XM_RX_WM_MSK 0x03ff /* Bit 11.. 0: Rx FIFO Watermark bits */ +#define XM_RX_WM_MSK 0x03ff /* Bit 11.. 0: Rx FIFO Watermark bits */ -/* XM_DEV_ID 32 bit ro Device ID Register */ -#define XM_DEV_OUI (0x00ffffffUL<<8) /* Bit 31..8: Device OUI */ -#define XM_DEV_REV (0x07L << 5) /* Bit 7..5: Chip Rev Num */ +/* XM_DEV_ID 32 bit r/o Device ID Register */ +#define XM_DEV_OUI (0x00ffffffUL<<8) /* Bit 31..8: Device OUI */ +#define XM_DEV_REV (0x07L << 5) /* Bit 7..5: Chip Rev Num */ /* XM_MODE 32 bit r/w Mode Register */ @@ -435,10 +506,10 @@ #define XM_MD_ENA_REJ (1L<<26) /* Bit 26: Enable Frame Reject */ #define XM_MD_SPOE_E (1L<<25) /* Bit 25: Send Pause on Edge */ /* extern generated */ -#define XM_MD_TX_REP (1L<<24) /* Bit 24: Transmit Repeater Mode*/ -#define XM_MD_SPOFF_I (1L<<23) /* Bit 23: Send Pause on FIFOfull*/ +#define XM_MD_TX_REP (1L<<24) /* Bit 24: Transmit Repeater Mode */ +#define XM_MD_SPOFF_I (1L<<23) /* Bit 23: Send Pause on FIFO full */ /* intern generated */ -#define XM_MD_LE_STW (1L<<22) /* Bit 22: Rx Stat Word in Lit En*/ +#define XM_MD_LE_STW (1L<<22) /* Bit 22: Rx Stat Word in Little Endian */ #define XM_MD_TX_CONT (1L<<21) /* Bit 21: Send Continuous */ #define XM_MD_TX_PAUSE (1L<<20) /* Bit 20: (sc) Send Pause Frame */ #define XM_MD_ATS (1L<<19) /* Bit 19: Append Time Stamp */ @@ -447,40 +518,40 @@ #define XM_MD_SPOH_I (1L<<17) /* Bit 17: Send Pause on High */ /* intern generated */ #define XM_MD_CAP (1L<<16) /* Bit 16: Check Address Pair */ -#define XM_MD_ENA_HSH (1L<<15) /* Bit 15: Enable Hashing */ +#define XM_MD_ENA_HASH (1L<<15) /* Bit 15: Enable Hashing */ #define XM_MD_CSA (1L<<14) /* Bit 14: Check Station Address */ #define XM_MD_CAA (1L<<13) /* Bit 13: Check Address Array */ -#define XM_MD_RX_MCTRL (1L<<12) /* Bit 12: Rx MAC Control Frames */ +#define XM_MD_RX_MCTRL (1L<<12) /* Bit 12: Rx MAC Control Frame */ #define XM_MD_RX_RUNT (1L<<11) /* Bit 11: Rx Runt Frames */ -#define XM_MD_RX_IRLE (1L<<10) /* Bit 10: Rx in Range Len Err F */ -#define XM_MD_RX_LONG (1L<<9) /* Bit 9: Rx Long Frames */ -#define XM_MD_RX_CRCE (1L<<8) /* Bit 8: Rx CRC Error Frames */ -#define XM_MD_RX_ERR (1L<<7) /* Bit 7: Rx Error Frames */ -#define XM_MD_DIS_UC (1L<<6) /* Bit 6: Disable Rx Unicast */ -#define XM_MD_DIS_MC (1L<<5) /* Bit 5: Disable Rx Multicast */ -#define XM_MD_DIS_BC (1L<<4) /* Bit 4: Disable Rx Boradcast */ -#define XM_MD_ENA_PROM (1L<<3) /* Bit 3: Enable Promiscuous */ -#define XM_MD_ENA_BE (1L<<2) /* Bit 2: Enable Big Endian */ -#define XM_MD_FTF (1L<<1) /* Bit 1: (sc) Flush Tx FIFO */ -#define XM_MD_FRF (1L<<0) /* Bit 0: (sc) Flush Rx FIFO */ - -#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I) -#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\ - XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA | XM_MD_CAA) +#define XM_MD_RX_IRLE (1L<<10) /* Bit 10: Rx in Range Len Err Frame */ +#define XM_MD_RX_LONG (1L<<9) /* Bit 9: Rx Long Frame */ +#define XM_MD_RX_CRCE (1L<<8) /* Bit 8: Rx CRC Error Frame */ +#define XM_MD_RX_ERR (1L<<7) /* Bit 7: Rx Error Frame */ +#define XM_MD_DIS_UC (1L<<6) /* Bit 6: Disable Rx Unicast */ +#define XM_MD_DIS_MC (1L<<5) /* Bit 5: Disable Rx Multicast */ +#define XM_MD_DIS_BC (1L<<4) /* Bit 4: Disable Rx Broadcast */ +#define XM_MD_ENA_PROM (1L<<3) /* Bit 3: Enable Promiscuous */ +#define XM_MD_ENA_BE (1L<<2) /* Bit 2: Enable Big Endian */ +#define XM_MD_FTF (1L<<1) /* Bit 1: (sc) Flush Tx FIFO */ +#define XM_MD_FRF (1L<<0) /* Bit 0: (sc) Flush Rx FIFO */ + +#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I) +#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\ + XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA | XM_MD_CAA) /* XM_STAT_CMD 16 bit r/w Statistics Command Register */ /* Bit 16..6: reserved */ -#define XM_SC_SNP_RXC (1<<5) /* Bit 5: (sc) Snap Rx Counters */ -#define XM_SC_SNP_TXC (1<<4) /* Bit 4: (sc) Snap Tx Counters */ -#define XM_SC_CP_RXC (1<<3) /* Bit 3: Copy Rx Counters Continuously */ -#define XM_SC_CP_TXC (1<<2) /* Bit 2: Copy Tx Counters Continuously */ -#define XM_SC_CLR_RXC (1<<1) /* Bit 1: (sc) Clear Rx Counters */ -#define XM_SC_CLR_TXC (1<<0) /* Bit 0: (sc) Clear Tx Counters */ +#define XM_SC_SNP_RXC (1<<5) /* Bit 5: (sc) Snap Rx Counters */ +#define XM_SC_SNP_TXC (1<<4) /* Bit 4: (sc) Snap Tx Counters */ +#define XM_SC_CP_RXC (1<<3) /* Bit 3: Copy Rx Counters Continuously */ +#define XM_SC_CP_TXC (1<<2) /* Bit 2: Copy Tx Counters Continuously */ +#define XM_SC_CLR_RXC (1<<1) /* Bit 1: (sc) Clear Rx Counters */ +#define XM_SC_CLR_TXC (1<<0) /* Bit 0: (sc) Clear Tx Counters */ -/* XM_RX_CNT_EV 32 bit ro Rx Counter Event Register */ +/* XM_RX_CNT_EV 32 bit r/o Rx Counter Event Register */ /* XM_RX_EV_MSK 32 bit r/w Rx Counter Event Mask */ -#define XMR_MAX_SZ_OV (1UL<<31) /* Bit 31: 1024-MaxSize Rx Cnt Ov*/ +#define XMR_MAX_SZ_OV (1UL<<31) /* Bit 31: 1024-MaxSize Rx Cnt Ov*/ #define XMR_1023B_OV (1L<<30) /* Bit 30: 512-1023Byte Rx Cnt Ov*/ #define XMR_511B_OV (1L<<29) /* Bit 29: 256-511 Byte Rx Cnt Ov*/ #define XMR_255B_OV (1L<<28) /* Bit 28: 128-255 Byte Rx Cnt Ov*/ @@ -502,20 +573,20 @@ #define XMR_FRA_ERR_OV (1L<<12) /* Bit 12: Rx Framing Err Cnt Ov */ #define XMR_FMISS_OV (1L<<11) /* Bit 11: Rx Missed Ev Cnt Ov */ #define XMR_BURST (1L<<10) /* Bit 10: Rx Burst Event Cnt Ov */ -#define XMR_INV_MOC (1L<<9) /* Bit 9: Rx with inv. MAC OC Ov*/ -#define XMR_INV_MP (1L<<8) /* Bit 8: Rx inv Pause Frame Ov */ -#define XMR_MCTRL_OV (1L<<7) /* Bit 7: Rx MAC Ctrl-F Cnt Ov */ -#define XMR_MPAUSE_OV (1L<<6) /* Bit 6: Rx Pause MAC Ctrl-F Ov*/ -#define XMR_UC_OK_OV (1L<<5) /* Bit 5: Rx Unicast Frame CntOv*/ -#define XMR_MC_OK_OV (1L<<4) /* Bit 4: Rx Multicast Cnt Ov */ -#define XMR_BC_OK_OV (1L<<3) /* Bit 3: Rx Broadcast Cnt Ov */ -#define XMR_OK_LO_OV (1L<<2) /* Bit 2: Octets Rx OK Low CntOv*/ -#define XMR_OK_HI_OV (1L<<1) /* Bit 1: Octets Rx OK Hi Cnt Ov*/ -#define XMR_OK_OV (1L<<0) /* Bit 0: Frames Received Ok Ov */ +#define XMR_INV_MOC (1L<<9) /* Bit 9: Rx with inv. MAC OC Ov*/ +#define XMR_INV_MP (1L<<8) /* Bit 8: Rx inv Pause Frame Ov */ +#define XMR_MCTRL_OV (1L<<7) /* Bit 7: Rx MAC Ctrl-F Cnt Ov */ +#define XMR_MPAUSE_OV (1L<<6) /* Bit 6: Rx Pause MAC Ctrl-F Ov*/ +#define XMR_UC_OK_OV (1L<<5) /* Bit 5: Rx Unicast Frame CntOv*/ +#define XMR_MC_OK_OV (1L<<4) /* Bit 4: Rx Multicast Cnt Ov */ +#define XMR_BC_OK_OV (1L<<3) /* Bit 3: Rx Broadcast Cnt Ov */ +#define XMR_OK_LO_OV (1L<<2) /* Bit 2: Octets Rx OK Low CntOv*/ +#define XMR_OK_HI_OV (1L<<1) /* Bit 1: Octets Rx OK Hi Cnt Ov*/ +#define XMR_OK_OV (1L<<0) /* Bit 0: Frames Received Ok Ov */ -#define XMR_DEF_MSK 0x00000006L /* all bits excepting 1 and 2 */ +#define XMR_DEF_MSK (XMR_OK_LO_OV | XMR_OK_HI_OV) -/* XM_TX_CNT_EV 32 bit ro Tx Counter Event Register */ +/* XM_TX_CNT_EV 32 bit r/o Tx Counter Event Register */ /* XM_TX_EV_MSK 32 bit r/w Tx Counter Event Mask */ /* Bit 31..26: reserved */ #define XMT_MAX_SZ_OV (1L<<25) /* Bit 25: 1024-MaxSize Tx Cnt Ov*/ @@ -534,18 +605,18 @@ #define XMT_ABO_COL_OV (1L<<12) /* Bit 12: Tx abo dueto Ex Col Ov*/ #define XMT_MUL_COL_OV (1L<<11) /* Bit 11: Tx Mult Col Cnt Ov */ #define XMT_SNG_COL (1L<<10) /* Bit 10: Tx Single Col Cnt Ov */ -#define XMT_MCTRL_OV (1L<<9) /* Bit 9: Tx MAC Ctrl Counter Ov*/ -#define XMT_MPAUSE (1L<<8) /* Bit 8: Tx Pause MAC Ctrl-F Ov*/ -#define XMT_BURST (1L<<7) /* Bit 7: Tx Burst Event Cnt Ov */ -#define XMT_LONG (1L<<6) /* Bit 6: Tx Long Frame Cnt Ov */ -#define XMT_UC_OK_OV (1L<<5) /* Bit 5: Tx Unicast Cnt Ov */ -#define XMT_MC_OK_OV (1L<<4) /* Bit 4: Tx Multicast Cnt Ov */ -#define XMT_BC_OK_OV (1L<<3) /* Bit 3: Tx Broadcast Cnt Ov */ -#define XMT_OK_LO_OV (1L<<2) /* Bit 2: Octets Tx OK Low CntOv*/ -#define XMT_OK_HI_OV (1L<<1) /* Bit 1: Octets Tx OK Hi Cnt Ov*/ -#define XMT_OK_OV (1L<<0) /* Bit 0: Frames Tx Ok Ov */ +#define XMT_MCTRL_OV (1L<<9) /* Bit 9: Tx MAC Ctrl Counter Ov*/ +#define XMT_MPAUSE (1L<<8) /* Bit 8: Tx Pause MAC Ctrl-F Ov*/ +#define XMT_BURST (1L<<7) /* Bit 7: Tx Burst Event Cnt Ov */ +#define XMT_LONG (1L<<6) /* Bit 6: Tx Long Frame Cnt Ov */ +#define XMT_UC_OK_OV (1L<<5) /* Bit 5: Tx Unicast Cnt Ov */ +#define XMT_MC_OK_OV (1L<<4) /* Bit 4: Tx Multicast Cnt Ov */ +#define XMT_BC_OK_OV (1L<<3) /* Bit 3: Tx Broadcast Cnt Ov */ +#define XMT_OK_LO_OV (1L<<2) /* Bit 2: Octets Tx OK Low CntOv*/ +#define XMT_OK_HI_OV (1L<<1) /* Bit 1: Octets Tx OK Hi Cnt Ov*/ +#define XMT_OK_OV (1L<<0) /* Bit 0: Frames Tx Ok Ov */ -#define XMT_DEF_MSK 0x00000006L /* all bits excepting 1 and 2 */ +#define XMT_DEF_MSK (XMT_OK_LO_OV | XMT_OK_HI_OV) /* * Receive Frame Status Encoding @@ -559,16 +630,16 @@ /* Bit 12: reserved */ #define XMR_FS_BURST (1L<<11) /* Bit 11: Burst Mode */ #define XMR_FS_CEX_ERR (1L<<10) /* Bit 10: Carrier Ext. Error */ -#define XMR_FS_802_3 (1L<<9) /* Bit 9: 802.3 Frame */ -#define XMR_FS_COL_ERR (1L<<8) /* Bit 8: Collision Error */ -#define XMR_FS_CAR_ERR (1L<<7) /* Bit 7: Carrier Event Error */ -#define XMR_FS_LEN_ERR (1L<<6) /* Bit 6: In-Range Length Error */ -#define XMR_FS_FRA_ERR (1L<<5) /* Bit 5: Framing Error */ -#define XMR_FS_RUNT (1L<<4) /* Bit 4: Runt Error */ -#define XMR_FS_LNG_ERR (1L<<3) /* Bit 3: Gaint Error */ -#define XMR_FS_FCS_ERR (1L<<2) /* Bit 2: Frame Check Sequ Err */ -#define XMR_FS_ERR (1L<<1) /* Bit 1: Frame Error */ -#define XMR_FS_MCTRL (1L<<0) /* Bit 0: MAC Control Packet */ +#define XMR_FS_802_3 (1L<<9) /* Bit 9: 802.3 Frame */ +#define XMR_FS_COL_ERR (1L<<8) /* Bit 8: Collision Error */ +#define XMR_FS_CAR_ERR (1L<<7) /* Bit 7: Carrier Event Error */ +#define XMR_FS_LEN_ERR (1L<<6) /* Bit 6: In-Range Length Error */ +#define XMR_FS_FRA_ERR (1L<<5) /* Bit 5: Framing Error */ +#define XMR_FS_RUNT (1L<<4) /* Bit 4: Runt Frame */ +#define XMR_FS_LNG_ERR (1L<<3) /* Bit 3: Giant (Jumbo) Frame */ +#define XMR_FS_FCS_ERR (1L<<2) /* Bit 2: Frame Check Sequ Err */ +#define XMR_FS_ERR (1L<<1) /* Bit 1: Frame Error */ +#define XMR_FS_MCTRL (1L<<0) /* Bit 0: MAC Control Packet */ /* * XMR_FS_ERR will be set if @@ -584,102 +655,135 @@ /* * XMAC-PHY Registers, indirect addressed over the XMAC */ -#define PHY_XMAC_CTRL 0x00 /* 16 bit r/w PHY Control Register */ -#define PHY_XMAC_STAT 0x01 /* 16 bit r/w PHY Status Register */ -#define PHY_XMAC_ID0 0x02 /* 16 bit ro PHY ID0 Register */ -#define PHY_XMAC_ID1 0x03 /* 16 bit ro PHY ID1 Register */ -#define PHY_XMAC_AUNE_ADV 0x04 /* 16 bit r/w Autoneg Advertisement */ -#define PHY_XMAC_AUNE_LP 0x05 /* 16 bit ro Link Partner Abi Reg */ -#define PHY_XMAC_AUNE_EXP 0x06 /* 16 bit ro Autoneg Expansion Reg */ -#define PHY_XMAC_NEPG 0x07 /* 16 bit r/w Next Page Register */ -#define PHY_XMAC_NEPG_LP 0x08 /* 16 bit ro Next Page Link P Reg */ +#define PHY_XMAC_CTRL 0x00 /* 16 bit r/w PHY Control Register */ +#define PHY_XMAC_STAT 0x01 /* 16 bit r/w PHY Status Register */ +#define PHY_XMAC_ID0 0x02 /* 16 bit r/o PHY ID0 Register */ +#define PHY_XMAC_ID1 0x03 /* 16 bit r/o PHY ID1 Register */ +#define PHY_XMAC_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */ +#define PHY_XMAC_AUNE_LP 0x05 /* 16 bit r/o Link Partner Abi Reg */ +#define PHY_XMAC_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */ +#define PHY_XMAC_NEPG 0x07 /* 16 bit r/w Next Page Register */ +#define PHY_XMAC_NEPG_LP 0x08 /* 16 bit r/o Next Page Link P Reg */ /* 0x09 - 0x0e: reserved */ -#define PHY_XMAC_EXT_STAT 0x0f /* 16 bit ro Ext Status Register */ -#define PHY_XMAC_RES_ABI 0x10 /* 16 bit ro PHY Resolved Ability */ +#define PHY_XMAC_EXT_STAT 0x0f /* 16 bit r/o Ext Status Register */ +#define PHY_XMAC_RES_ABI 0x10 /* 16 bit r/o PHY Resolved Ability */ /*----------------------------------------------------------------------------*/ /* * Broadcom-PHY Registers, indirect addressed over XMAC */ -#define PHY_BCOM_CTRL 0x00 /* 16 bit r/w PHY Control Register */ -#define PHY_BCOM_STAT 0x01 /* 16 bit ro PHY Status Register */ -#define PHY_BCOM_ID0 0x02 /* 16 bit ro PHY ID0 Register */ -#define PHY_BCOM_ID1 0x03 /* 16 bit ro PHY ID1 Register */ -#define PHY_BCOM_AUNE_ADV 0x04 /* 16 bit r/w Autoneg Advertisement */ -#define PHY_BCOM_AUNE_LP 0x05 /* 16 bit ro Link Part Ability Reg */ -#define PHY_BCOM_AUNE_EXP 0x06 /* 16 bit ro Autoneg Expansion Reg */ -#define PHY_BCOM_NEPG 0x07 /* 16 bit r/w Next Page Register */ -#define PHY_BCOM_NEPG_LP 0x08 /* 16 bit ro Next Page Link P Reg */ +#define PHY_BCOM_CTRL 0x00 /* 16 bit r/w PHY Control Register */ +#define PHY_BCOM_STAT 0x01 /* 16 bit r/o PHY Status Register */ +#define PHY_BCOM_ID0 0x02 /* 16 bit r/o PHY ID0 Register */ +#define PHY_BCOM_ID1 0x03 /* 16 bit r/o PHY ID1 Register */ +#define PHY_BCOM_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */ +#define PHY_BCOM_AUNE_LP 0x05 /* 16 bit r/o Link Part Ability Reg */ +#define PHY_BCOM_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */ +#define PHY_BCOM_NEPG 0x07 /* 16 bit r/w Next Page Register */ +#define PHY_BCOM_NEPG_LP 0x08 /* 16 bit r/o Next Page Link P Reg */ /* Broadcom-specific registers */ -#define PHY_BCOM_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Ctrl Reg */ -#define PHY_BCOM_1000T_STAT 0x0a /* 16 bit ro 1000Base-T Status Reg */ +#define PHY_BCOM_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Ctrl Reg */ +#define PHY_BCOM_1000T_STAT 0x0a /* 16 bit r/o 1000Base-T Status Reg */ /* 0x0b - 0x0e: reserved */ -#define PHY_BCOM_EXT_STAT 0x0f /* 16 bit ro Extended Status Reg */ -#define PHY_BCOM_P_EXT_CTRL 0x10 /* 16 bit r/w PHY Extended Ctrl Reg */ -#define PHY_BCOM_P_EXT_STAT 0x11 /* 16 bit ro PHY Extended Stat Reg */ -#define PHY_BCOM_RE_CTR 0x12 /* 16 bit r/w Receive Error Counter */ -#define PHY_BCOM_FC_CTR 0x13 /* 16 bit r/w False Carr Sense Cnt */ -#define PHY_BCOM_RNO_CTR 0x14 /* 16 bit r/w Receiver NOT_OK Cnt */ +#define PHY_BCOM_EXT_STAT 0x0f /* 16 bit r/o Extended Status Reg */ +#define PHY_BCOM_P_EXT_CTRL 0x10 /* 16 bit r/w PHY Extended Ctrl Reg */ +#define PHY_BCOM_P_EXT_STAT 0x11 /* 16 bit r/o PHY Extended Stat Reg */ +#define PHY_BCOM_RE_CTR 0x12 /* 16 bit r/w Receive Error Counter */ +#define PHY_BCOM_FC_CTR 0x13 /* 16 bit r/w False Carr Sense Cnt */ +#define PHY_BCOM_RNO_CTR 0x14 /* 16 bit r/w Receiver NOT_OK Cnt */ /* 0x15 - 0x17: reserved */ -#define PHY_BCOM_AUX_CTRL 0x18 /* 16 bit r/w Auxiliary Control Reg */ -#define PHY_BCOM_AUX_STAT 0x19 /* 16 bit ro Auxiliary Stat Summary*/ -#define PHY_BCOM_INT_STAT 0x1a /* 16 bit ro Interrupt Status Reg */ -#define PHY_BCOM_INT_MASK 0x1b /* 16 bit r/w Interrupt Mask Reg */ +#define PHY_BCOM_AUX_CTRL 0x18 /* 16 bit r/w Auxiliary Control Reg */ +#define PHY_BCOM_AUX_STAT 0x19 /* 16 bit r/o Auxiliary Stat Summary */ +#define PHY_BCOM_INT_STAT 0x1a /* 16 bit r/o Interrupt Status Reg */ +#define PHY_BCOM_INT_MASK 0x1b /* 16 bit r/w Interrupt Mask Reg */ /* 0x1c: reserved */ /* 0x1d - 0x1f: test registers */ /*----------------------------------------------------------------------------*/ /* + * Marvel-PHY Registers, indirect addressed over GMAC + */ +#define PHY_MARV_CTRL 0x00 /* 16 bit r/w PHY Control Register */ +#define PHY_MARV_STAT 0x01 /* 16 bit r/o PHY Status Register */ +#define PHY_MARV_ID0 0x02 /* 16 bit r/o PHY ID0 Register */ +#define PHY_MARV_ID1 0x03 /* 16 bit r/o PHY ID1 Register */ +#define PHY_MARV_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */ +#define PHY_MARV_AUNE_LP 0x05 /* 16 bit r/o Link Part Ability Reg */ +#define PHY_MARV_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */ +#define PHY_MARV_NEPG 0x07 /* 16 bit r/w Next Page Register */ +#define PHY_MARV_NEPG_LP 0x08 /* 16 bit r/o Next Page Link P Reg */ + /* Marvel-specific registers */ +#define PHY_MARV_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Ctrl Reg */ +#define PHY_MARV_1000T_STAT 0x0a /* 16 bit r/o 1000Base-T Status Reg */ + /* 0x0b - 0x0e: reserved */ +#define PHY_MARV_EXT_STAT 0x0f /* 16 bit r/o Extended Status Reg */ +#define PHY_MARV_PHY_CTRL 0x10 /* 16 bit r/w PHY Specific Ctrl Reg */ +#define PHY_MARV_PHY_STAT 0x11 /* 16 bit r/o PHY Specific Stat Reg */ +#define PHY_MARV_INT_MASK 0x12 /* 16 bit r/w Interrupt Mask Reg */ +#define PHY_MARV_INT_STAT 0x13 /* 16 bit r/o Interrupt Status Reg */ +#define PHY_MARV_EXT_CTRL 0x14 /* 16 bit r/w Ext. PHY Specific Ctrl */ +#define PHY_MARV_RXE_CNT 0x15 /* 16 bit r/w Receive Error Counter */ +#define PHY_MARV_EXT_ADR 0x16 /* 16 bit r/w Ext. Ad. for Cable Diag. */ + /* 0x17: reserved */ +#define PHY_MARV_LED_CTRL 0x18 /* 16 bit r/w LED Control Reg */ +#define PHY_MARV_LED_OVER 0x19 /* 16 bit r/w Manual LED Override Reg */ +#define PHY_MARV_EXT_CTRL_2 0x1a /* 16 bit r/w Ext. PHY Specific Ctrl 2 */ +#define PHY_MARV_EXT_P_STAT 0x1b /* 16 bit r/w Ext. PHY Spec. Stat Reg */ +#define PHY_MARV_CABLE_DIAG 0x1c /* 16 bit r/o Cable Diagnostic Reg */ + /* 0x1d - 0x1f: reserved */ + +/*----------------------------------------------------------------------------*/ +/* * Level One-PHY Registers, indirect addressed over XMAC */ -#define PHY_LONE_CTRL 0x00 /* 16 bit r/w PHY Control Register */ -#define PHY_LONE_STAT 0x01 /* 16 bit ro PHY Status Register */ -#define PHY_LONE_ID0 0x02 /* 16 bit ro PHY ID0 Register */ -#define PHY_LONE_ID1 0x03 /* 16 bit ro PHY ID1 Register */ -#define PHY_LONE_AUNE_ADV 0x04 /* 16 bit r/w Autoneg Advertisement */ -#define PHY_LONE_AUNE_LP 0x05 /* 16 bit ro Link Part Ability Reg */ -#define PHY_LONE_AUNE_EXP 0x06 /* 16 bit ro Autoneg Expansion Reg */ -#define PHY_LONE_NEPG 0x07 /* 16 bit r/w Next Page Register */ -#define PHY_LONE_NEPG_LP 0x08 /* 16 bit ro Next Page Link Partner*/ +#define PHY_LONE_CTRL 0x00 /* 16 bit r/w PHY Control Register */ +#define PHY_LONE_STAT 0x01 /* 16 bit r/o PHY Status Register */ +#define PHY_LONE_ID0 0x02 /* 16 bit r/o PHY ID0 Register */ +#define PHY_LONE_ID1 0x03 /* 16 bit r/o PHY ID1 Register */ +#define PHY_LONE_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */ +#define PHY_LONE_AUNE_LP 0x05 /* 16 bit r/o Link Part Ability Reg */ +#define PHY_LONE_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */ +#define PHY_LONE_NEPG 0x07 /* 16 bit r/w Next Page Register */ +#define PHY_LONE_NEPG_LP 0x08 /* 16 bit r/o Next Page Link Partner*/ /* Level One-specific registers */ -#define PHY_LONE_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Control Reg*/ -#define PHY_LONE_1000T_STAT 0x0a /* 16 bit ro 1000Base-T Status Reg */ +#define PHY_LONE_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Control Reg*/ +#define PHY_LONE_1000T_STAT 0x0a /* 16 bit r/o 1000Base-T Status Reg */ /* 0x0b -0x0e: reserved */ -#define PHY_LONE_EXT_STAT 0x0f /* 16 bit ro Extended Status Reg */ -#define PHY_LONE_PORT_CFG 0x10 /* 16 bit r/w Port Configuration Reg*/ -#define PHY_LONE_Q_STAT 0x11 /* 16 bit ro Quick Status Reg */ -#define PHY_LONE_INT_ENAB 0x12 /* 16 bit r/w Interrupt Enable Reg */ -#define PHY_LONE_INT_STAT 0x13 /* 16 bit ro Interrupt Status Reg */ -#define PHY_LONE_LED_CFG 0x14 /* 16 bit r/w LED Configuration Reg */ -#define PHY_LONE_PORT_CTRL 0x15 /* 16 bit r/w Port Control Reg */ -#define PHY_LONE_CIM 0x16 /* 16 bit ro CIM Reg */ +#define PHY_LONE_EXT_STAT 0x0f /* 16 bit r/o Extended Status Reg */ +#define PHY_LONE_PORT_CFG 0x10 /* 16 bit r/w Port Configuration Reg*/ +#define PHY_LONE_Q_STAT 0x11 /* 16 bit r/o Quick Status Reg */ +#define PHY_LONE_INT_ENAB 0x12 /* 16 bit r/w Interrupt Enable Reg */ +#define PHY_LONE_INT_STAT 0x13 /* 16 bit r/o Interrupt Status Reg */ +#define PHY_LONE_LED_CFG 0x14 /* 16 bit r/w LED Configuration Reg */ +#define PHY_LONE_PORT_CTRL 0x15 /* 16 bit r/w Port Control Reg */ +#define PHY_LONE_CIM 0x16 /* 16 bit r/o CIM Reg */ /* 0x17 -0x1c: reserved */ /*----------------------------------------------------------------------------*/ /* * National-PHY Registers, indirect addressed over XMAC */ -#define PHY_NAT_CTRL 0x00 /* 16 bit r/w PHY Control Register */ -#define PHY_NAT_STAT 0x01 /* 16 bit r/w PHY Status Register */ -#define PHY_NAT_ID0 0x02 /* 16 bit ro PHY ID0 Register */ -#define PHY_NAT_ID1 0x03 /* 16 bit ro PHY ID1 Register */ -#define PHY_NAT_AUNE_ADV 0x04 /* 16 bit r/w Autonegotiation Advertisement */ -#define PHY_NAT_AUNE_LP 0x05 /* 16 bit ro Link Partner Ability Reg */ -#define PHY_NAT_AUNE_EXP 0x06 /* 16 bit ro Autonegotiation Expansion Reg */ -#define PHY_NAT_NEPG 0x07 /* 16 bit r/w Next Page Register */ -#define PHY_NAT_NEPG_LP 0x08 /* 16 bit ro Next Page Link Partner Reg */ +#define PHY_NAT_CTRL 0x00 /* 16 bit r/w PHY Control Register */ +#define PHY_NAT_STAT 0x01 /* 16 bit r/w PHY Status Register */ +#define PHY_NAT_ID0 0x02 /* 16 bit r/o PHY ID0 Register */ +#define PHY_NAT_ID1 0x03 /* 16 bit r/o PHY ID1 Register */ +#define PHY_NAT_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */ +#define PHY_NAT_AUNE_LP 0x05 /* 16 bit r/o Link Partner Ability Reg */ +#define PHY_NAT_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */ +#define PHY_NAT_NEPG 0x07 /* 16 bit r/w Next Page Register */ +#define PHY_NAT_NEPG_LP 0x08 /* 16 bit r/o Next Page Link Partner Reg */ /* National-specific registers */ -#define PHY_NAT_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Control Reg */ -#define PHY_NAT_1000T_STAT 0x0a /* 16 bit ro 1000Base-T Status Reg */ +#define PHY_NAT_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Control Reg */ +#define PHY_NAT_1000T_STAT 0x0a /* 16 bit r/o 1000Base-T Status Reg */ /* 0x0b -0x0e: reserved */ -#define PHY_NAT_EXT_STAT 0x0f /* 16 bit ro Extended Status Register */ -#define PHY_NAT_EXT_CTRL1 0x10 /* 16 bit ro Extended Control Reg1 */ -#define PHY_NAT_Q_STAT1 0x11 /* 16 bit ro Quick Status Reg1 */ -#define PHY_NAT_10B_OP 0x12 /* 16 bit ro 10Base-T Operations Reg */ -#define PHY_NAT_EXT_CTRL2 0x13 /* 16 bit ro Extended Control Reg1 */ -#define PHY_NAT_Q_STAT2 0x14 /* 16 bit ro Quick Status Reg2 */ +#define PHY_NAT_EXT_STAT 0x0f /* 16 bit r/o Extended Status Register */ +#define PHY_NAT_EXT_CTRL1 0x10 /* 16 bit r/o Extended Control Reg1 */ +#define PHY_NAT_Q_STAT1 0x11 /* 16 bit r/o Quick Status Reg1 */ +#define PHY_NAT_10B_OP 0x12 /* 16 bit r/o 10Base-T Operations Reg */ +#define PHY_NAT_EXT_CTRL2 0x13 /* 16 bit r/o Extended Control Reg1 */ +#define PHY_NAT_Q_STAT2 0x14 /* 16 bit r/o Quick Status Reg2 */ /* 0x15 -0x18: reserved */ -#define PHY_NAT_PHY_ADDR 0x19 /* 16 bit ro PHY Address Register */ +#define PHY_NAT_PHY_ADDR 0x19 /* 16 bit r/o PHY Address Register */ /*----------------------------------------------------------------------------*/ @@ -691,131 +795,135 @@ * All other are general. */ -/***** PHY_XMAC_CTRL 16 bit r/w PHY Control Register *****/ -/***** PHY_BCOM_CTRL 16 bit r/w PHY Control Register *****/ -/***** PHY_LONE_CTRL 16 bit r/w PHY Control Register *****/ -#define PHY_CT_RESET (1<<15) /* Bit 15: (sc) clear all PHY releated regs */ +/***** PHY_XMAC_CTRL 16 bit r/w PHY Control Register *****/ +/***** PHY_BCOM_CTRL 16 bit r/w PHY Control Register *****/ +/***** PHY_LONE_CTRL 16 bit r/w PHY Control Register *****/ +#define PHY_CT_RESET (1<<15) /* Bit 15: (sc) clear all PHY related regs */ #define PHY_CT_LOOP (1<<14) /* Bit 14: enable Loopback over PHY */ #define PHY_CT_SPS_LSB (1<<13) /* Bit 13: (BC,L1) Speed select, lower bit */ -#define PHY_CT_ANE (1<<12) /* Bit 12: Autonegotiation Enabled */ +#define PHY_CT_ANE (1<<12) /* Bit 12: Auto-Negotiation Enabled */ #define PHY_CT_PDOWN (1<<11) /* Bit 11: (BC,L1) Power Down Mode */ #define PHY_CT_ISOL (1<<10) /* Bit 10: (BC,L1) Isolate Mode */ -#define PHY_CT_RE_CFG (1<<9) /* Bit 9: (sc) Restart Autonegotiation */ -#define PHY_CT_DUP_MD (1<<8) /* Bit 8: Duplex Mode */ -#define PHY_CT_COL_TST (1<<7) /* Bit 7: (BC,L1) Collsion Test enabled */ -#define PHY_CT_SPS_MSB (1<<6) /* Bit 6: (BC,L1) Speed select, upper bit */ - /* Bit 5..0: reserved */ - -#define PHY_B_CT_SP1000 (1<<6) /* Bit 6: enable speed of 1000 MBit/s */ -#define PHY_B_CT_SP100 (1<<13) /* Bit 13: enable speed of 100 MBit/s */ -#define PHY_B_CT_SP10 (0) /* Bit 6/13 not set, speed of 10 MBit/s */ - -#define PHY_L_CT_SP1000 (1<<6) /* Bit 6: enable speed of 1000 MBit/s */ -#define PHY_L_CT_SP100 (1<<13) /* Bit 13: enable speed of 100 MBit/s */ -#define PHY_L_CT_SP10 (0) /* Bit 6/13 not set, speed of 10 MBit/s */ - - -/***** PHY_XMAC_STAT 16 bit r/w PHY Status Register *****/ -/***** PHY_BCOM_STAT 16 bit r/w PHY Status Register *****/ -/***** PHY_LONE_STAT 16 bit r/w PHY Status Register *****/ +#define PHY_CT_RE_CFG (1<<9) /* Bit 9: (sc) Restart Auto-Negotiation */ +#define PHY_CT_DUP_MD (1<<8) /* Bit 8: Duplex Mode */ +#define PHY_CT_COL_TST (1<<7) /* Bit 7: (BC,L1) Collision Test enabled */ +#define PHY_CT_SPS_MSB (1<<6) /* Bit 6: (BC,L1) Speed select, upper bit */ + /* Bit 5..0: reserved */ + +#define PHY_CT_SP1000 PHY_CT_SPS_MSB /* enable speed of 1000 Mbps */ +#define PHY_CT_SP100 PHY_CT_SPS_LSB /* enable speed of 100 Mbps */ +#define PHY_CT_SP10 (0) /* enable speed of 10 Mbps */ + + +/***** PHY_XMAC_STAT 16 bit r/w PHY Status Register *****/ +/***** PHY_BCOM_STAT 16 bit r/w PHY Status Register *****/ +/***** PHY_MARV_STAT 16 bit r/w PHY Status Register *****/ +/***** PHY_LONE_STAT 16 bit r/w PHY Status Register *****/ /* Bit 15..9: reserved */ - /* (BC/L1) 100/10 MBit/s cap bits ignored*/ -#define PHY_ST_EXT_ST (1<<8) /* Bit 8: Extended Status Present */ - /* Bit 7: reserved */ -#define PHY_ST_PRE_SUB (1<<6) /* Bit 6: (BC/L1) preamble suppression */ -#define PHY_ST_AN_OVER (1<<5) /* Bit 5: Autonegotiation Over */ -#define PHY_ST_REM_FLT (1<<4) /* Bit 4: Remode Fault Condition Occured*/ -#define PHY_ST_AN_CAP (1<<3) /* Bit 3: Autonegotiation Capability */ -#define PHY_ST_LSYNC (1<<2) /* Bit 2: Link Synchronized */ -#define PHY_ST_JAP_DET (1<<1) /* Bit 1: (BC/L1) Japper Detected */ -#define PHY_ST_EXT_REG (1<<0) /* Bit 0: Extended Register available */ - - -/* PHY_XMAC_ID1 16 bit ro PHY ID1 Register */ -/* PHY_BCOM_ID1 16 bit ro PHY ID1 Register */ -/* PHY_LONE_ID1 16 bit ro PHY ID1 Register */ -#define PHY_I1_OUI (0x3f<<10) /* Bit 15..10: Organiz. Unique ID */ -#define PHY_I1_MOD_NUM (0x3f<<4) /* Bit 9.. 4: Model Number */ -#define PHY_I1_REV (0x0f<<0) /* Bit 3.. 0: Revision Number */ + /* (BC/L1) 100/10 Mbps cap bits ignored*/ +#define PHY_ST_EXT_ST (1<<8) /* Bit 8: Extended Status Present */ + /* Bit 7: reserved */ +#define PHY_ST_PRE_SUP (1<<6) /* Bit 6: (BC/L1) preamble suppression */ +#define PHY_ST_AN_OVER (1<<5) /* Bit 5: Auto-Negotiation Over */ +#define PHY_ST_REM_FLT (1<<4) /* Bit 4: Remote Fault Condition Occured */ +#define PHY_ST_AN_CAP (1<<3) /* Bit 3: Auto-Negotiation Capability */ +#define PHY_ST_LSYNC (1<<2) /* Bit 2: Link Synchronized */ +#define PHY_ST_JAB_DET (1<<1) /* Bit 1: (BC/L1) Jabber Detected */ +#define PHY_ST_EXT_REG (1<<0) /* Bit 0: Extended Register available */ + + +/***** PHY_XMAC_ID1 16 bit r/o PHY ID1 Register */ +/***** PHY_BCOM_ID1 16 bit r/o PHY ID1 Register */ +/***** PHY_MARV_ID1 16 bit r/o PHY ID1 Register */ +/***** PHY_LONE_ID1 16 bit r/o PHY ID1 Register */ +#define PHY_I1_OUI_MSK (0x3f<<10) /* Bit 15..10: Organization Unique ID */ +#define PHY_I1_MOD_NUM (0x3f<<4) /* Bit 9.. 4: Model Number */ +#define PHY_I1_REV_MSK 0x0f /* Bit 3.. 0: Revision Number */ + +/* different Broadcom PHY Ids */ +#define PHY_BCOM_ID1_A1 0x6041 +#define PHY_BCOM_ID1_B2 0x6043 +#define PHY_BCOM_ID1_C0 0x6044 +#define PHY_BCOM_ID1_C5 0x6047 -/***** PHY_XMAC_AUNE_ADV 16 bit r/w Autoneg Advertisement *****/ -/***** PHY_XMAC_AUNE_LP 16 bit ro Link Partner Ability Reg *****/ +/***** PHY_XMAC_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ +/***** PHY_XMAC_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/ #define PHY_AN_NXT_PG (1<<15) /* Bit 15: Request Next Page */ #define PHY_X_AN_ACK (1<<14) /* Bit 14: (ro) Acknowledge Received */ -#define PHY_X_AN_RFB (3<<12) /* Bit 13..12: Remode Fault Bits */ +#define PHY_X_AN_RFB (3<<12) /* Bit 13..12: Remote Fault Bits */ /* Bit 11.. 9: reserved */ -#define PHY_X_AN_PAUSE (3<<7) /* Bit 8.. 7: Pause Bits */ -#define PHY_X_AN_HD (1<<6) /* Bit 6: Half Duplex */ -#define PHY_X_AN_FD (1<<5) /* Bit 5: Full Duplex */ - /* Bit 4.. 0: reserved */ +#define PHY_X_AN_PAUSE (3<<7) /* Bit 8.. 7: Pause Bits */ +#define PHY_X_AN_HD (1<<6) /* Bit 6: Half Duplex */ +#define PHY_X_AN_FD (1<<5) /* Bit 5: Full Duplex */ + /* Bit 4.. 0: reserved */ -/***** PHY_BCOM_AUNE_ADV 16 bit r/w Autoneg Advertisement *****/ -/***** PHY_BCOM_AUNE_LP 16 bit ro Link Partner Ability Reg *****/ +/***** PHY_BCOM_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ +/***** PHY_BCOM_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/ /* PHY_AN_NXT_PG (see XMAC) Bit 15: Request Next Page */ /* Bit 14: reserved */ #define PHY_B_AN_RF (1<<13) /* Bit 13: Remote Fault */ /* Bit 12: reserved */ -#define PHY_B_AN_ASP (1<<11) /* Bit 11: Asymetric Pause */ +#define PHY_B_AN_ASP (1<<11) /* Bit 11: Asymmetric Pause */ #define PHY_B_AN_PC (1<<10) /* Bit 10: Pause Capable */ - /* Bit 9..5: 100/10 BT cap bits ingnored */ -#define PHY_B_AN_SEL (0x1f<<0)/* Bit 4..0: Selector Field, 00001=Ethernet*/ + /* Bit 9..5: 100/10 BT cap bits ingnored */ +#define PHY_B_AN_SEL 0x1f /* Bit 4..0: Selector Field, 00001=Ethernet*/ -/***** PHY_LONE_AUNE_ADV 16 bit r/w Autoneg Advertisement *****/ -/***** PHY_LONE_AUNE_LP 16 bit ro Link Partner Ability Reg *****/ +/***** PHY_LONE_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ +/***** PHY_LONE_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/ /* PHY_AN_NXT_PG (see XMAC) Bit 15: Request Next Page */ /* Bit 14: reserved */ #define PHY_L_AN_RF (1<<13) /* Bit 13: Remote Fault */ /* Bit 12: reserved */ -#define PHY_L_AN_ASP (1<<11) /* Bit 11: Asymetric Pause */ +#define PHY_L_AN_ASP (1<<11) /* Bit 11: Asymmetric Pause */ #define PHY_L_AN_PC (1<<10) /* Bit 10: Pause Capable */ - /* Bit 9..5: 100/10 BT cap bits ingnored */ -#define PHY_L_AN_SEL (0x1f<<0)/* Bit 4..0: Selector Field, 00001=Ethernet*/ + /* Bit 9..5: 100/10 BT cap bits ingnored */ +#define PHY_L_AN_SEL 0x1f /* Bit 4..0: Selector Field, 00001=Ethernet*/ -/***** PHY_NAT_AUNE_ADV 16 bit r/w Autoneg Advertisement *****/ -/***** PHY_NAT_AUNE_LP 16 bit ro Link Partner Ability Reg *****/ +/***** PHY_NAT_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ +/***** PHY_NAT_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/ /* PHY_AN_NXT_PG (see XMAC) Bit 15: Request Next Page */ /* Bit 14: reserved */ #define PHY_N_AN_RF (1<<13) /* Bit 13: Remote Fault */ /* Bit 12: reserved */ #define PHY_N_AN_100F (1<<11) /* Bit 11: 100Base-T2 FD Support */ #define PHY_N_AN_100H (1<<10) /* Bit 10: 100Base-T2 HD Support */ - /* Bit 9..5: 100/10 BT cap bits ingnored */ -#define PHY_N_AN_SEL (0x1f<<0)/* Bit 4..0: Selector Field, 00001=Ethernet*/ + /* Bit 9..5: 100/10 BT cap bits ingnored */ +#define PHY_N_AN_SEL 0x1f /* Bit 4..0: Selector Field, 00001=Ethernet*/ /* field type definition for PHY_x_AN_SEL */ #define PHY_SEL_TYPE 0x01 /* 00001 = Ethernet */ -/***** PHY_XMAC_AUNE_EXP 16 bit ro Autoneg Expansion Reg *****/ - /* Bit 15..4: reserved */ -#define PHY_AN_LP_NP (1<<3) /* Bit 3: Link Partner can Next Page */ -#define PHY_AN_LOC_NP (1<<2) /* Bit 2: Local PHY can Next Page */ -#define PHY_AN_RX_PG (1<<1) /* Bit 1: Page Received */ - /* Bit 0: reserved */ +/***** PHY_XMAC_AUNE_EXP 16 bit r/o Auto-Negotiation Expansion Reg *****/ + /* Bit 15..4: reserved */ +#define PHY_AN_LP_NP (1<<3) /* Bit 3: Link Partner can Next Page */ +#define PHY_AN_LOC_NP (1<<2) /* Bit 2: Local PHY can Next Page */ +#define PHY_AN_RX_PG (1<<1) /* Bit 1: Page Received */ + /* Bit 0: reserved */ -/***** PHY_BCOM_AUNE_EXP 16 bit ro Autoneg Expansion Reg *****/ +/***** PHY_BCOM_AUNE_EXP 16 bit r/o Auto-Negotiation Expansion Reg *****/ /* Bit 15..5: reserved */ -#define PHY_B_AN_PDF (1<<4) /* Bit 4: Parallel Detection Fault */ -/* PHY_AN_LP_NP (see XMAC) Bit 3: Link Partner can Next Page */ -/* PHY_AN_LOC_NP (see XMAC) Bit 2: Local PHY can Next Page */ -/* PHY_AN_RX_PG (see XMAC) Bit 1: Page Received */ -#define PHY_B_AN_LP_CAP (1<<0) /* Bit 0: Link Partner Autoneg Cap. */ - -/***** PHY_LONE_AUNE_EXP 16 bit ro Autoneg Expansion Reg *****/ -#define PHY_L_AN_BP (1<<5) /* Bit 5: Base Page Indication */ -#define PHY_L_AN_PDF (1<<4) /* Bit 4: Parallel Detection Fault */ -/* PHY_AN_LP_NP (see XMAC) Bit 3: Link Partner can Next Page */ -/* PHY_AN_LOC_NP (see XMAC) Bit 2: Local PHY can Next Page */ -/* PHY_AN_RX_PG (see XMAC) Bit 1: Page Received */ -#define PHY_B_AN_LP_CAP (1<<0) /* Bit 0: Link Partner Autoneg Cap. */ - - -/***** PHY_XMAC_NEPG 16 bit r/w Next Page Register *****/ -/***** PHY_BCOM_NEPG 16 bit r/w Next Page Register *****/ -/***** PHY_LONE_NEPG 16 bit r/w Next Page Register *****/ -/***** PHY_XMAC_NEPG_LP 16 bit ro Next Page Link Partner *****/ -/***** PHY_BCOM_NEPG_LP 16 bit ro Next Page Link Partner *****/ -/***** PHY_LONE_NEPG_LP 16 bit ro Next Page Link Partner *****/ +#define PHY_B_AN_PDF (1<<4) /* Bit 4: Parallel Detection Fault */ +/* PHY_AN_LP_NP (see XMAC) Bit 3: Link Partner can Next Page */ +/* PHY_AN_LOC_NP (see XMAC) Bit 2: Local PHY can Next Page */ +/* PHY_AN_RX_PG (see XMAC) Bit 1: Page Received */ +#define PHY_B_AN_LP_CAP (1<<0) /* Bit 0: Link Partner Auto-Neg. Cap. */ + +/***** PHY_LONE_AUNE_EXP 16 bit r/o Auto-Negotiation Expansion Reg *****/ +#define PHY_L_AN_BP (1<<5) /* Bit 5: Base Page Indication */ +#define PHY_L_AN_PDF (1<<4) /* Bit 4: Parallel Detection Fault */ +/* PHY_AN_LP_NP (see XMAC) Bit 3: Link Partner can Next Page */ +/* PHY_AN_LOC_NP (see XMAC) Bit 2: Local PHY can Next Page */ +/* PHY_AN_RX_PG (see XMAC) Bit 1: Page Received */ +#define PHY_B_AN_LP_CAP (1<<0) /* Bit 0: Link Partner Auto-Neg. Cap. */ + + +/***** PHY_XMAC_NEPG 16 bit r/w Next Page Register *****/ +/***** PHY_BCOM_NEPG 16 bit r/w Next Page Register *****/ +/***** PHY_LONE_NEPG 16 bit r/w Next Page Register *****/ +/***** PHY_XMAC_NEPG_LP 16 bit r/o Next Page Link Partner *****/ +/***** PHY_BCOM_NEPG_LP 16 bit r/o Next Page Link Partner *****/ +/***** PHY_LONE_NEPG_LP 16 bit r/o Next Page Link Partner *****/ #define PHY_NP_MORE (1<<15) /* Bit 15: More, Next Pages to follow */ #define PHY_NP_ACK1 (1<<14) /* Bit 14: (ro) Ack 1, for receiving a message*/ #define PHY_NP_MSG_VAL (1<<13) /* Bit 13: Message Page valid */ @@ -826,66 +934,66 @@ /* * XMAC-Specific */ -/***** PHY_XMAC_EXT_STAT 16 bit r/w Extended Status Register *****/ +/***** PHY_XMAC_EXT_STAT 16 bit r/w Extended Status Register *****/ #define PHY_X_EX_FD (1<<15) /* Bit 15: Device Supports Full Duplex */ #define PHY_X_EX_HD (1<<14) /* Bit 14: Device Supports Half Duplex */ /* Bit 13..0: reserved */ -/***** PHY_XMAC_RES_ABI 16 bit ro PHY Resolved Ability *****/ +/***** PHY_XMAC_RES_ABI 16 bit r/o PHY Resolved Ability *****/ /* Bit 15..9: reserved */ -#define PHY_X_RS_PAUSE (3<<7) /* Bit 8..7: selected Pause Mode */ -#define PHY_X_RS_HD (1<<6) /* Bit 6: Half Duplex Mode selected */ -#define PHY_X_RS_FD (1<<5) /* Bit 5: Full Duplex Mode selected */ -#define PHY_X_RS_ABLMIS (1<<4) /* Bit 4: duplex or pause cap mismatch */ -#define PHY_X_RS_PAUMIS (1<<3) /* Bit 3: pause capability missmatch */ - /* Bit 2..0: reserved */ +#define PHY_X_RS_PAUSE (3<<7) /* Bit 8..7: selected Pause Mode */ +#define PHY_X_RS_HD (1<<6) /* Bit 6: Half Duplex Mode selected */ +#define PHY_X_RS_FD (1<<5) /* Bit 5: Full Duplex Mode selected */ +#define PHY_X_RS_ABLMIS (1<<4) /* Bit 4: duplex or pause cap mismatch */ +#define PHY_X_RS_PAUMIS (1<<3) /* Bit 3: pause capability missmatch */ + /* Bit 2..0: reserved */ /* * Remote Fault Bits (PHY_X_AN_RFB) encoding */ -#define X_RFB_OK (0<<12) /* Bit 12..13 No errors, Link OK */ -#define X_RFB_LF (1<<12) /* Bit 12..13 Link Failure */ -#define X_RFB_OFF (2<<12) /* Bit 12..13 Offline */ -#define X_RFB_AN_ERR (3<<12) /* Bit 12..13 Autonegotiation Error */ +#define X_RFB_OK (0<<12) /* Bit 13..12 No errors, Link OK */ +#define X_RFB_LF (1<<12) /* Bit 13..12 Link Failure */ +#define X_RFB_OFF (2<<12) /* Bit 13..12 Offline */ +#define X_RFB_AN_ERR (3<<12) /* Bit 13..12 Auto-Negotiation Error */ /* * Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */ -#define PHY_X_P_NO_PAUSE (0<<7) /* Bit 8..7: no Pause Mode */ -#define PHY_X_P_SYM_MD (1<<7) /* Bit 8..7: symmetric Pause Mode */ -#define PHY_X_P_ASYM_MD (2<<7) /* Bit 8..7: asymmetric Pause Mode */ -#define PHY_X_P_BOTH_MD (3<<7) /* Bit 8..7: both Pause Mode */ +#define PHY_X_P_NO_PAUSE (0<<7) /* Bit 8..7: no Pause Mode */ +#define PHY_X_P_SYM_MD (1<<7) /* Bit 8..7: symmetric Pause Mode */ +#define PHY_X_P_ASYM_MD (2<<7) /* Bit 8..7: asymmetric Pause Mode */ +#define PHY_X_P_BOTH_MD (3<<7) /* Bit 8..7: both Pause Mode */ /* * Broadcom-Specific */ -/***** PHY_BCOM_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ -#define PHY_B_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */ -#define PHY_B_1000C_MSE (1<<12) /* Bit 12: Master/Slave Enable */ -#define PHY_B_1000C_MSC (1<<11) /* Bit 11: M/S Configuration */ -#define PHY_B_1000C_RD (1<<10) /* Bit 10: Repeater/DTE */ -#define PHY_B_1000C_AFD (1<<9) /* Bit 9: Advertise Full Duplex */ -#define PHY_B_1000C_AHD (1<<8) /* Bit 8: Advertise Half Duplex */ - /* Bit 7..0: reserved */ - -/***** PHY_BCOM_1000T_STAT 16 bit ro 1000Base-T Status Reg *****/ -#define PHY_B_1000S_MSF (1<<15) /* Bit 15: Master/Slave Fault */ -#define PHY_B_1000S_MSR (1<<14) /* Bit 14: Master/Slave Result */ -#define PHY_B_1000S_LRS (1<<13) /* Bit 13: Local Receiver Status */ -#define PHY_B_1000S_RRS (1<<12) /* Bit 12: Remote Receiver Status */ -#define PHY_B_1000S_LP_FD (1<<11) /* Bit 11: Link Partner can FD */ -#define PHY_B_1000S_LP_HD (1<<10) /* Bit 10: Link Partner can HD */ - /* Bit 9..8: reserved */ -#define PHY_B_1000S_IEC (255<<0)/* Bit 7..0: Idle Error Count */ +/***** PHY_BCOM_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +#define PHY_B_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */ +#define PHY_B_1000C_MSE (1<<12) /* Bit 12: Master/Slave Enable */ +#define PHY_B_1000C_MSC (1<<11) /* Bit 11: M/S Configuration */ +#define PHY_B_1000C_RD (1<<10) /* Bit 10: Repeater/DTE */ +#define PHY_B_1000C_AFD (1<<9) /* Bit 9: Advertise Full Duplex */ +#define PHY_B_1000C_AHD (1<<8) /* Bit 8: Advertise Half Duplex */ + /* Bit 7..0: reserved */ + +/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +#define PHY_B_1000S_MSF (1<<15) /* Bit 15: Master/Slave Fault */ +#define PHY_B_1000S_MSR (1<<14) /* Bit 14: Master/Slave Result */ +#define PHY_B_1000S_LRS (1<<13) /* Bit 13: Local Receiver Status */ +#define PHY_B_1000S_RRS (1<<12) /* Bit 12: Remote Receiver Status */ +#define PHY_B_1000S_LP_FD (1<<11) /* Bit 11: Link Partner can FD */ +#define PHY_B_1000S_LP_HD (1<<10) /* Bit 10: Link Partner can HD */ + /* Bit 9..8: reserved */ +#define PHY_B_1000S_IEC 0xff /* Bit 7..0: Idle Error Count */ -/***** PHY_BCOM_EXT_STAT 16 bit ro Extended Status Register *****/ +/***** PHY_BCOM_EXT_STAT 16 bit r/o Extended Status Register *****/ #define PHY_B_ES_X_FD_CAP (1<<15) /* Bit 15: 1000Base-X FD capable */ #define PHY_B_ES_X_HD_CAP (1<<14) /* Bit 14: 1000Base-X HD capable */ #define PHY_B_ES_T_FD_CAP (1<<13) /* Bit 13: 1000Base-T FD capable */ #define PHY_B_ES_T_HD_CAP (1<<12) /* Bit 12: 1000Base-T HD capable */ /* Bit 11..0: reserved */ -/***** PHY_BCOM_P_EXT_CTRL 16 bit r/w PHY Extended Control Reg *****/ +/***** PHY_BCOM_P_EXT_CTRL 16 bit r/w PHY Extended Control Reg *****/ #define PHY_B_PEC_MAC_PHY (1<<15) /* Bit 15: 10BIT/GMI-Interface */ #define PHY_B_PEC_DIS_CROSS (1<<14) /* Bit 14: Disable MDI Crossover */ #define PHY_B_PEC_TX_DIS (1<<13) /* Bit 13: Tx output Disabled */ @@ -901,9 +1009,9 @@ #define PHY_B_PEC_LED_OFF (1<<3) /* Bit 3: Force LED's off */ #define PHY_B_PEC_EX_IPG (1<<2) /* Bit 2: Extend Tx IPG Mode */ #define PHY_B_PEC_3_LED (1<<1) /* Bit 1: Three Link LED mode */ -#define PHY_B_PEC_HIGH_LA (1<<0) /* Bit 0: GMII Fifo Elasticy */ +#define PHY_B_PEC_HIGH_LA (1<<0) /* Bit 0: GMII FIFO Elasticy */ -/***** PHY_BCOM_P_EXT_STAT 16 bit ro PHY Extended Status Reg *****/ +/***** PHY_BCOM_P_EXT_STAT 16 bit r/o PHY Extended Status Reg *****/ /* Bit 15..14: reserved */ #define PHY_B_PES_CROSS_STAT (1<<13) /* Bit 13: MDI Crossover Status */ #define PHY_B_PES_INT_STAT (1<<12) /* Bit 12: Interrupt Status */ @@ -920,20 +1028,20 @@ #define PHY_B_PES_LOCK_ER (1<<1) /* Bit 1: Lock Error */ #define PHY_B_PES_MLT3_ER (1<<0) /* Bit 0: MLT3 code Error */ -/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/ +/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/ /* Bit 15..8: reserved */ -#define PHY_B_FC_CTR (255<<0)/* Bit 7..0: False Carrier Counter */ +#define PHY_B_FC_CTR 0xff /* Bit 7..0: False Carrier Counter */ -/***** PHY_BCOM_RNO_CTR 16 bit r/w Receive NOT_OK Counter *****/ -#define PHY_B_RC_LOC (255<<8)/* Bit 15..8: Local Rx NOT_OK cnt */ -#define PHY_B_RC_REM (255<<0)/* Bit 7..0: Remote Rx NOT_OK cnt */ +/***** PHY_BCOM_RNO_CTR 16 bit r/w Receive NOT_OK Counter *****/ +#define PHY_B_RC_LOC_MSK 0xff00 /* Bit 15..8: Local Rx NOT_OK cnt */ +#define PHY_B_RC_REM_MSK 0x00ff /* Bit 7..0: Remote Rx NOT_OK cnt */ -/***** PHY_BCOM_AUX_CTRL 16 bit r/w Auxiliary Control Reg *****/ +/***** PHY_BCOM_AUX_CTRL 16 bit r/w Auxiliary Control Reg *****/ #define PHY_B_AC_L_SQE (1<<15) /* Bit 15: Low Squelch */ #define PHY_B_AC_LONG_PACK (1<<14) /* Bit 14: Rx Long Packets */ #define PHY_B_AC_ER_CTRL (3<<12) /* Bit 13..12: Edgerate Control */ /* Bit 11: reserved */ -#define PHY_B_AC_TX_TST (1<<10) /* Bit 10: tx test bit, always 1 */ +#define PHY_B_AC_TX_TST (1<<10) /* Bit 10: Tx test bit, always 1 */ /* Bit 9.. 8: reserved */ #define PHY_B_AC_DIS_PRF (1<<7) /* Bit 7: dis part resp filter */ /* Bit 6: reserved */ @@ -942,14 +1050,14 @@ #define PHY_B_AC_DIAG (1<<3) /* Bit 3: Diagnostic Mode */ /* Bit 2.. 0: reserved */ -/***** PHY_BCOM_AUX_STAT 16 bit ro Auxiliary Status Reg *****/ +/***** PHY_BCOM_AUX_STAT 16 bit r/o Auxiliary Status Reg *****/ #define PHY_B_AS_AN_C (1<<15) /* Bit 15: AutoNeg complete */ #define PHY_B_AS_AN_CA (1<<14) /* Bit 14: AN Complete Ack */ #define PHY_B_AS_ANACK_D (1<<13) /* Bit 13: AN Ack Detect */ #define PHY_B_AS_ANAB_D (1<<12) /* Bit 12: AN Ability Detect */ #define PHY_B_AS_NPW (1<<11) /* Bit 11: AN Next Page Wait */ -#define PHY_B_AS_AN_RES (7<<8) /* Bit 10..8: AN HDC */ -#define PHY_B_AS_PDF (1<<7) /* Bit 7: Parallel Detect. Fault*/ +#define PHY_B_AS_AN_RES_MSK (7<<8) /* Bit 10..8: AN HDC */ +#define PHY_B_AS_PDF (1<<7) /* Bit 7: Parallel Detect. Fault */ #define PHY_B_AS_RF (1<<6) /* Bit 6: Remote Fault */ #define PHY_B_AS_ANP_R (1<<5) /* Bit 5: AN Page Received */ #define PHY_B_AS_LP_ANAB (1<<4) /* Bit 4: LP AN Ability */ @@ -958,8 +1066,10 @@ #define PHY_B_AS_PRR (1<<1) /* Bit 1: Pause Resolution-Rx */ #define PHY_B_AS_PRT (1<<0) /* Bit 0: Pause Resolution-Tx */ -/***** PHY_BCOM_INT_STAT 16 bit ro Interrupt Status Reg *****/ -/***** PHY_BCOM_INT_MASK 16 bit r/w Interrupt Mask Reg *****/ +#define PHY_B_AS_PAUSE_MSK (PHY_B_AS_PRR | PHY_B_AS_PRT) + +/***** PHY_BCOM_INT_STAT 16 bit r/o Interrupt Status Reg *****/ +/***** PHY_BCOM_INT_MASK 16 bit r/w Interrupt Mask Reg *****/ /* Bit 15: reserved */ #define PHY_B_IS_PSE (1<<14) /* Bit 14: Pair Swap Error */ #define PHY_B_IS_MDXI_SC (1<<13) /* Bit 13: MDIX Status Change */ @@ -979,125 +1089,123 @@ #define PHY_B_DEF_MSK (~(PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) -/* - * Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding - */ -#define PHY_B_P_NO_PAUSE (0<<10) /* Bit 11..10: no Pause Mode */ -#define PHY_B_P_SYM_MD (1<<10) /* Bit 11..10: symmetric Pause Mode */ -#define PHY_B_P_ASYM_MD (2<<10) /* Bit 11..10: asymmetric Pause Mode */ -#define PHY_B_P_BOTH_MD (3<<10) /* Bit 11..10: both Pause Mode */ +/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */ +#define PHY_B_P_NO_PAUSE (0<<10) /* Bit 11..10: no Pause Mode */ +#define PHY_B_P_SYM_MD (1<<10) /* Bit 11..10: symmetric Pause Mode */ +#define PHY_B_P_ASYM_MD (2<<10) /* Bit 11..10: asymmetric Pause Mode */ +#define PHY_B_P_BOTH_MD (3<<10) /* Bit 11..10: both Pause Mode */ /* * Resolved Duplex mode and Capabilities (Aux Status Summary Reg) */ -#define PHY_B_RES_1000FD (7<<8) /* Bit 10..8: 1000Base-T Full Dup. */ -#define PHY_B_RES_1000HD (6<<8) /* Bit 10..8: 1000Base-T Half Dup. */ +#define PHY_B_RES_1000FD (7<<8) /* Bit 10..8: 1000Base-T Full Dup. */ +#define PHY_B_RES_1000HD (6<<8) /* Bit 10..8: 1000Base-T Half Dup. */ /* others: 100/10: invalid for us */ /* * Level One-Specific */ -/***** PHY_LONE_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ -#define PHY_L_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */ -#define PHY_L_1000C_MSE (1<<12) /* Bit 12: Master/Slave Enable */ -#define PHY_L_1000C_MSC (1<<11) /* Bit 11: M/S Configuration */ -#define PHY_L_1000C_RD (1<<10) /* Bit 10: Repeater/DTE */ -#define PHY_L_1000C_AFD (1<<9) /* Bit 9: Advertise Full Duplex */ -#define PHY_L_1000C_AHD (1<<8) /* Bit 8: Advertise Half Duplex */ - /* Bit 7..0: reserved */ - -/***** PHY_LONE_1000T_STAT 16 bit ro 1000Base-T Status Reg *****/ -#define PHY_L_1000S_MSF (1<<15) /* Bit 15: Master/Slave Fault */ -#define PHY_L_1000S_MSR (1<<14) /* Bit 14: Master/Slave Result */ -#define PHY_L_1000S_LRS (1<<13) /* Bit 13: Local Receiver Status */ -#define PHY_L_1000S_RRS (1<<12) /* Bit 12: Remote Receiver Status*/ -#define PHY_L_1000S_LP_FD (1<<11) /* Bit 11: Link Partner can FD */ -#define PHY_L_1000S_LP_HD (1<<10) /* Bit 10: Link Partner can HD */ - /* Bit 9..8: reserved */ -#define PHY_B_1000S_IEC (255<<0)/* Bit 7..0: Idle Error Count */ +/***** PHY_LONE_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +#define PHY_L_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */ +#define PHY_L_1000C_MSE (1<<12) /* Bit 12: Master/Slave Enable */ +#define PHY_L_1000C_MSC (1<<11) /* Bit 11: M/S Configuration */ +#define PHY_L_1000C_RD (1<<10) /* Bit 10: Repeater/DTE */ +#define PHY_L_1000C_AFD (1<<9) /* Bit 9: Advertise Full Duplex */ +#define PHY_L_1000C_AHD (1<<8) /* Bit 8: Advertise Half Duplex */ + /* Bit 7..0: reserved */ + +/***** PHY_LONE_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +#define PHY_L_1000S_MSF (1<<15) /* Bit 15: Master/Slave Fault */ +#define PHY_L_1000S_MSR (1<<14) /* Bit 14: Master/Slave Result */ +#define PHY_L_1000S_LRS (1<<13) /* Bit 13: Local Receiver Status */ +#define PHY_L_1000S_RRS (1<<12) /* Bit 12: Remote Receiver Status*/ +#define PHY_L_1000S_LP_FD (1<<11) /* Bit 11: Link Partner can FD */ +#define PHY_L_1000S_LP_HD (1<<10) /* Bit 10: Link Partner can HD */ + /* Bit 9..8: reserved */ +#define PHY_B_1000S_IEC 0xff /* Bit 7..0: Idle Error Count */ -/***** PHY_LONE_EXT_STAT 16 bit ro Extended Status Register *****/ +/***** PHY_LONE_EXT_STAT 16 bit r/o Extended Status Register *****/ #define PHY_L_ES_X_FD_CAP (1<<15) /* Bit 15: 1000Base-X FD capable */ #define PHY_L_ES_X_HD_CAP (1<<14) /* Bit 14: 1000Base-X HD capable */ #define PHY_L_ES_T_FD_CAP (1<<13) /* Bit 13: 1000Base-T FD capable */ #define PHY_L_ES_T_HD_CAP (1<<12) /* Bit 12: 1000Base-T HD capable */ /* Bit 11..0: reserved */ -/***** PHY_LONE_PORT_CFG 16 bit r/w Port Configuration Reg *****/ +/***** PHY_LONE_PORT_CFG 16 bit r/w Port Configuration Reg *****/ #define PHY_L_PC_REP_MODE (1<<15) /* Bit 15: Repeater Mode */ /* Bit 14: reserved */ #define PHY_L_PC_TX_DIS (1<<13) /* Bit 13: Tx output Disabled */ #define PHY_L_PC_BY_SCR (1<<12) /* Bit 12: Bypass Scrambler */ #define PHY_L_PC_BY_45 (1<<11) /* Bit 11: Bypass 4B5B-Decoder */ #define PHY_L_PC_JAB_DIS (1<<10) /* Bit 10: Jabber Disabled */ -#define PHY_L_PC_SQE (1<<9) /* Bit 9: Enable Heartbeat */ -#define PHY_L_PC_TP_LOOP (1<<8) /* Bit 8: TP Loopback */ -#define PHY_L_PC_SSS (1<<7) /* Bit 7: Smart Speed Selection */ -#define PHY_L_PC_FIFO_SIZE (1<<6) /* Bit 6: FIFO Size */ -#define PHY_L_PC_PRE_EN (1<<5) /* Bit 5: Preamble Enable */ -#define PHY_L_PC_CIM (1<<4) /* Bit 4: Carrier Integrity Mon */ -#define PHY_L_PC_10_SER (1<<3) /* Bit 3: Use Serial Output */ -#define PHY_L_PC_ANISOL (1<<2) /* Bit 2: Unisolate Port */ -#define PHY_L_PC_TEN_BIT (1<<1) /* Bit 1: 10bit iface mode on */ -#define PHY_L_PC_ALTCLOCK (1<<0) /* Bit 0: (ro) ALTCLOCK Mode on */ +#define PHY_L_PC_SQE (1<<9) /* Bit 9: Enable Heartbeat */ +#define PHY_L_PC_TP_LOOP (1<<8) /* Bit 8: TP Loopback */ +#define PHY_L_PC_SSS (1<<7) /* Bit 7: Smart Speed Selection */ +#define PHY_L_PC_FIFO_SIZE (1<<6) /* Bit 6: FIFO Size */ +#define PHY_L_PC_PRE_EN (1<<5) /* Bit 5: Preamble Enable */ +#define PHY_L_PC_CIM (1<<4) /* Bit 4: Carrier Integrity Mon */ +#define PHY_L_PC_10_SER (1<<3) /* Bit 3: Use Serial Output */ +#define PHY_L_PC_ANISOL (1<<2) /* Bit 2: Unisolate Port */ +#define PHY_L_PC_TEN_BIT (1<<1) /* Bit 1: 10bit iface mode on */ +#define PHY_L_PC_ALTCLOCK (1<<0) /* Bit 0: (ro) ALTCLOCK Mode on */ -/***** PHY_LONE_Q_STAT 16 bit ro Quick Status Reg *****/ +/***** PHY_LONE_Q_STAT 16 bit r/o Quick Status Reg *****/ #define PHY_L_QS_D_RATE (3<<14) /* Bit 15..14: Data Rate */ #define PHY_L_QS_TX_STAT (1<<13) /* Bit 13: Transmitting */ #define PHY_L_QS_RX_STAT (1<<12) /* Bit 12: Receiving */ #define PHY_L_QS_COL_STAT (1<<11) /* Bit 11: Collision */ #define PHY_L_QS_L_STAT (1<<10) /* Bit 10: Link is up */ -#define PHY_L_QS_DUP_MOD (1<<9) /* Bit 9: Full/Half Duplex */ -#define PHY_L_QS_AN (1<<8) /* Bit 8: AutoNeg is On */ -#define PHY_L_QS_AN_C (1<<7) /* Bit 7: AN is Complete */ -#define PHY_L_QS_LLE (7<<4) /* Bit 6: Line Length Estim. */ -#define PHY_L_QS_PAUSE (1<<3) /* Bit 3: LP advertised Pause */ -#define PHY_L_QS_AS_PAUSE (1<<2) /* Bit 2: LP adv. asym. Pause */ -#define PHY_L_QS_ISOLATE (1<<1) /* Bit 1: CIM Isolated */ -#define PHY_L_QS_EVENT (1<<0) /* Bit 0: Event has occurred */ +#define PHY_L_QS_DUP_MOD (1<<9) /* Bit 9: Full/Half Duplex */ +#define PHY_L_QS_AN (1<<8) /* Bit 8: AutoNeg is On */ +#define PHY_L_QS_AN_C (1<<7) /* Bit 7: AN is Complete */ +#define PHY_L_QS_LLE (7<<4) /* Bit 6: Line Length Estim. */ +#define PHY_L_QS_PAUSE (1<<3) /* Bit 3: LP advertised Pause */ +#define PHY_L_QS_AS_PAUSE (1<<2) /* Bit 2: LP adv. asym. Pause */ +#define PHY_L_QS_ISOLATE (1<<1) /* Bit 1: CIM Isolated */ +#define PHY_L_QS_EVENT (1<<0) /* Bit 0: Event has occurred */ -/***** PHY_LONE_INT_ENAB 16 bit r/w Interrupt Enable Reg *****/ -/***** PHY_LONE_INT_STAT 16 bit ro Interrupt Status Reg *****/ +/***** PHY_LONE_INT_ENAB 16 bit r/w Interrupt Enable Reg *****/ +/***** PHY_LONE_INT_STAT 16 bit r/o Interrupt Status Reg *****/ /* Bit 15..14: reserved */ -#define PHY_L_IS_AN_F (1<<13) /* Bit 13: Autoneg fault */ +#define PHY_L_IS_AN_F (1<<13) /* Bit 13: Auto-Negotiation fault */ /* Bit 12: not described */ #define PHY_L_IS_CROSS (1<<11) /* Bit 11: Crossover used */ #define PHY_L_IS_POL (1<<10) /* Bit 10: Polarity correct. used*/ -#define PHY_L_IS_SS (1<<9) /* Bit 9: Smart Speed Downgrade*/ -#define PHY_L_IS_CFULL (1<<8) /* Bit 8: Counter Full */ -#define PHY_L_IS_AN_C (1<<7) /* Bit 7: AutoNeg Complete */ -#define PHY_L_IS_SPEED (1<<6) /* Bit 6: Speed Changed */ -#define PHY_L_IS_DUP (1<<5) /* Bit 5: Duplex Changed */ -#define PHY_L_IS_LS (1<<4) /* Bit 4: Link Status Changed */ -#define PHY_L_IS_ISOL (1<<3) /* Bit 3: Isolate Occured */ -#define PHY_L_IS_MDINT (1<<2) /* Bit 2: (ro) STAT: MII Int Pending */ -#define PHY_L_IS_INTEN (1<<1) /* Bit 1: ENAB: Enable IRQs */ -#define PHY_L_IS_FORCE (1<<0) /* Bit 0: ENAB: Force Interrupt */ +#define PHY_L_IS_SS (1<<9) /* Bit 9: Smart Speed Downgrade*/ +#define PHY_L_IS_CFULL (1<<8) /* Bit 8: Counter Full */ +#define PHY_L_IS_AN_C (1<<7) /* Bit 7: AutoNeg Complete */ +#define PHY_L_IS_SPEED (1<<6) /* Bit 6: Speed Changed */ +#define PHY_L_IS_DUP (1<<5) /* Bit 5: Duplex Changed */ +#define PHY_L_IS_LS (1<<4) /* Bit 4: Link Status Changed */ +#define PHY_L_IS_ISOL (1<<3) /* Bit 3: Isolate Occured */ +#define PHY_L_IS_MDINT (1<<2) /* Bit 2: (ro) STAT: MII Int Pending */ +#define PHY_L_IS_INTEN (1<<1) /* Bit 1: ENAB: Enable IRQs */ +#define PHY_L_IS_FORCE (1<<0) /* Bit 0: ENAB: Force Interrupt */ /* int. mask */ #define PHY_L_DEF_MSK (PHY_L_IS_LS | PHY_L_IS_ISOL | PHY_L_IS_INTEN) -/***** PHY_LONE_LED_CFG 16 bit r/w LED Configuration Reg *****/ +/***** PHY_LONE_LED_CFG 16 bit r/w LED Configuration Reg *****/ #define PHY_L_LC_LEDC (3<<14) /* Bit 15..14: Col/Blink/On/Off */ #define PHY_L_LC_LEDR (3<<12) /* Bit 13..12: Rx/Blink/On/Off */ #define PHY_L_LC_LEDT (3<<10) /* Bit 11..10: Tx/Blink/On/Off */ -#define PHY_L_LC_LEDG (3<<8) /* Bit 9..8: Giga/Blink/On/Off */ -#define PHY_L_LC_LEDS (3<<6) /* Bit 7..6: 10-100/Blink/On/Off */ -#define PHY_L_LC_LEDL (3<<4) /* Bit 5..4: Link/Blink/On/Off */ -#define PHY_L_LC_LEDF (3<<2) /* Bit 3..2: Duplex/Blink/On/Off */ -#define PHY_L_LC_PSTRECH (1<<1) /* Bit 1: Strech LED Pulses */ -#define PHY_L_LC_FREQ (1<<0) /* Bit 0: 30/100 ms */ +#define PHY_L_LC_LEDG (3<<8) /* Bit 9..8: Giga/Blink/On/Off */ +#define PHY_L_LC_LEDS (3<<6) /* Bit 7..6: 10-100/Blink/On/Off */ +#define PHY_L_LC_LEDL (3<<4) /* Bit 5..4: Link/Blink/On/Off */ +#define PHY_L_LC_LEDF (3<<2) /* Bit 3..2: Duplex/Blink/On/Off */ +#define PHY_L_LC_PSTRECH (1<<1) /* Bit 1: Strech LED Pulses */ +#define PHY_L_LC_FREQ (1<<0) /* Bit 0: 30/100 ms */ -/***** PHY_LONE_PORT_CTRL 16 bit r/w Port Control Reg *****/ +/***** PHY_LONE_PORT_CTRL 16 bit r/w Port Control Reg *****/ #define PHY_L_PC_TX_TCLK (1<<15) /* Bit 15: Enable TX_TCLK */ /* Bit 14: reserved */ #define PHY_L_PC_ALT_NP (1<<13) /* Bit 14: Alternate Next Page */ #define PHY_L_PC_GMII_ALT (1<<12) /* Bit 13: Alternate GMII driver */ /* Bit 11: reserved */ #define PHY_L_PC_TEN_CRS (1<<10) /* Bit 10: Extend CRS*/ - /* Bit 9..0: not described */ + /* Bit 9..0: not described */ -/***** PHY_LONE_CIM 16 bit ro CIM Reg *****/ +/***** PHY_LONE_CIM 16 bit r/o CIM Reg *****/ #define PHY_L_CIM_ISOL (255<<8)/* Bit 15..8: Isolate Count */ #define PHY_L_CIM_FALSE_CAR (255<<0)/* Bit 7..0: False Carrier Count */ @@ -1105,37 +1213,37 @@ /* * Pause Bits (PHY_L_AN_ASP and PHY_L_AN_PC) encoding */ -#define PHY_L_P_NO_PAUSE (0<<10) /* Bit 11..10: no Pause Mode */ -#define PHY_L_P_SYM_MD (1<<10) /* Bit 11..10: symmetric Pause Mode */ -#define PHY_L_P_ASYM_MD (2<<10) /* Bit 11..10: asymmetric Pause Mode */ -#define PHY_L_P_BOTH_MD (3<<10) /* Bit 11..10: both Pause Mode */ +#define PHY_L_P_NO_PAUSE (0<<10) /* Bit 11..10: no Pause Mode */ +#define PHY_L_P_SYM_MD (1<<10) /* Bit 11..10: symmetric Pause Mode */ +#define PHY_L_P_ASYM_MD (2<<10) /* Bit 11..10: asymmetric Pause Mode */ +#define PHY_L_P_BOTH_MD (3<<10) /* Bit 11..10: both Pause Mode */ /* * National-Specific */ -/***** PHY_NAT_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ -#define PHY_N_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */ -#define PHY_N_1000C_MSE (1<<12) /* Bit 12: Master/Slave Enable */ -#define PHY_N_1000C_MSC (1<<11) /* Bit 11: M/S Configuration */ -#define PHY_N_1000C_RD (1<<10) /* Bit 10: Repeater/DTE */ -#define PHY_N_1000C_AFD (1<<9) /* Bit 9: Advertise Full Duplex */ -#define PHY_N_1000C_AHD (1<<8) /* Bit 8: Advertise Half Duplex */ -#define PHY_N_1000C_APC (1<<7) /* Bit 7: Asymetric Pause Cap. */ - /* Bit 6..0: reserved */ - -/***** PHY_NAT_1000T_STAT 16 bit ro 1000Base-T Status Reg *****/ -#define PHY_N_1000S_MSF (1<<15) /* Bit 15: Master/Slave Fault */ -#define PHY_N_1000S_MSR (1<<14) /* Bit 14: Master/Slave Result */ -#define PHY_N_1000S_LRS (1<<13) /* Bit 13: Local Receiver Status */ -#define PHY_N_1000S_RRS (1<<12) /* Bit 12: Remote Receiver Status*/ -#define PHY_N_1000S_LP_FD (1<<11) /* Bit 11: Link Partner can FD */ -#define PHY_N_1000S_LP_HD (1<<10) /* Bit 10: Link Partner can HD */ -#define PHY_N_1000C_LP_APC (1<<9) /* Bit 9: LP Asym. Pause Cap. */ - /* Bit 8: reserved */ -#define PHY_N_1000S_IEC (255<<0)/* Bit 7..0: Idle Error Count */ +/***** PHY_NAT_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +#define PHY_N_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */ +#define PHY_N_1000C_MSE (1<<12) /* Bit 12: Master/Slave Enable */ +#define PHY_N_1000C_MSC (1<<11) /* Bit 11: M/S Configuration */ +#define PHY_N_1000C_RD (1<<10) /* Bit 10: Repeater/DTE */ +#define PHY_N_1000C_AFD (1<<9) /* Bit 9: Advertise Full Duplex */ +#define PHY_N_1000C_AHD (1<<8) /* Bit 8: Advertise Half Duplex */ +#define PHY_N_1000C_APC (1<<7) /* Bit 7: Asymmetric Pause Cap. */ + /* Bit 6..0: reserved */ + +/***** PHY_NAT_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +#define PHY_N_1000S_MSF (1<<15) /* Bit 15: Master/Slave Fault */ +#define PHY_N_1000S_MSR (1<<14) /* Bit 14: Master/Slave Result */ +#define PHY_N_1000S_LRS (1<<13) /* Bit 13: Local Receiver Status */ +#define PHY_N_1000S_RRS (1<<12) /* Bit 12: Remote Receiver Status*/ +#define PHY_N_1000S_LP_FD (1<<11) /* Bit 11: Link Partner can FD */ +#define PHY_N_1000S_LP_HD (1<<10) /* Bit 10: Link Partner can HD */ +#define PHY_N_1000C_LP_APC (1<<9) /* Bit 9: LP Asym. Pause Cap. */ + /* Bit 8: reserved */ +#define PHY_N_1000S_IEC 0xff /* Bit 7..0: Idle Error Count */ -/***** PHY_NAT_EXT_STAT 16 bit ro Extended Status Register *****/ +/***** PHY_NAT_EXT_STAT 16 bit r/o Extended Status Register *****/ #define PHY_N_ES_X_FD_CAP (1<<15) /* Bit 15: 1000Base-X FD capable */ #define PHY_N_ES_X_HD_CAP (1<<14) /* Bit 14: 1000Base-X HD capable */ #define PHY_N_ES_T_FD_CAP (1<<13) /* Bit 13: 1000Base-T FD capable */ @@ -1143,12 +1251,461 @@ /* Bit 11..0: reserved */ /* todo: those are still missing */ -/***** PHY_NAT_EXT_CTRL1 16 bit ro Extended Control Reg1 *****/ -/***** PHY_NAT_Q_STAT1 16 bit ro Quick Status Reg1 *****/ -/***** PHY_NAT_10B_OP 16 bit ro 10Base-T Operations Reg *****/ -/***** PHY_NAT_EXT_CTRL2 16 bit ro Extended Control Reg1 *****/ -/***** PHY_NAT_Q_STAT2 16 bit ro Quick Status Reg2 *****/ -/***** PHY_NAT_PHY_ADDR 16 bit ro PHY Address Register *****/ +/***** PHY_NAT_EXT_CTRL1 16 bit r/o Extended Control Reg1 *****/ +/***** PHY_NAT_Q_STAT1 16 bit r/o Quick Status Reg1 *****/ +/***** PHY_NAT_10B_OP 16 bit r/o 10Base-T Operations Reg *****/ +/***** PHY_NAT_EXT_CTRL2 16 bit r/o Extended Control Reg1 *****/ +/***** PHY_NAT_Q_STAT2 16 bit r/o Quick Status Reg2 *****/ +/***** PHY_NAT_PHY_ADDR 16 bit r/o PHY Address Register *****/ + +/* + * Marvell-Specific + */ +/***** PHY_MARV_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ +#define PHY_M_AN_NXT_PG BIT_15 /* Request Next Page */ + /* Bit 14: reserved */ +#define PHY_M_AN_RF BIT_13 /* Remote Fault */ + /* Bit 12: reserved */ +#define PHY_M_AN_ASP BIT_11 /* Asymmetric Pause */ +#define PHY_M_AN_PC BIT_10 /* MAC Pause implemented */ +#define PHY_M_AN_100_FD BIT_8 /* Advertise 100Base-TX Full Duplex */ +#define PHY_M_AN_100_HD BIT_7 /* Advertise 100Base-TX Half Duplex */ +#define PHY_M_AN_10_FD BIT_6 /* Advertise 10Base-TX Full Duplex */ +#define PHY_M_AN_10_HD BIT_5 /* Advertise 10Base-TX Half Duplex */ + +/* special defines for FIBER (88E1011S only) */ +#define PHY_M_AN_ASP_X BIT_8 /* Asymmetric Pause */ +#define PHY_M_AN_PC_X BIT_7 /* MAC Pause implemented */ +#define PHY_M_AN_1000X_AHD BIT_6 /* Advertise 10000Base-X Half Duplex */ +#define PHY_M_AN_1000X_AFD BIT_5 /* Advertise 10000Base-X Full Duplex */ + +/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */ +#define PHY_M_P_NO_PAUSE_X (0<<7) /* Bit 8.. 7: no Pause Mode */ +#define PHY_M_P_SYM_MD_X (1<<7) /* Bit 8.. 7: symmetric Pause Mode */ +#define PHY_M_P_ASYM_MD_X (2<<7) /* Bit 8.. 7: asymmetric Pause Mode */ +#define PHY_M_P_BOTH_MD_X (3<<7) /* Bit 8.. 7: both Pause Mode */ + +/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +#define PHY_M_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */ +#define PHY_M_1000C_MSE (1<<12) /* Bit 12: Manual Master/Slave Enable */ +#define PHY_M_1000C_MSC (1<<11) /* Bit 11: M/S Configuration (1=Master) */ +#define PHY_M_1000C_MPD (1<<10) /* Bit 10: Multi-Port Device */ +#define PHY_M_1000C_AFD (1<<9) /* Bit 9: Advertise Full Duplex */ +#define PHY_M_1000C_AHD (1<<8) /* Bit 8: Advertise Half Duplex */ + /* Bit 7..0: reserved */ + +/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/ + +#define PHY_M_PC_TX_FFD_MSK (3<<14) /* Bit 15..14: Tx FIFO Depth Mask */ +#define PHY_M_PC_RX_FFD_MSK (3<<12) /* Bit 13..12: Rx FIFO Depth Mask */ +#define PHY_M_PC_ASS_CRS_TX (1<<11) /* Bit 11: Assert CRS on Transmit */ +#define PHY_M_PC_FL_GOOD (1<<10) /* Bit 10: Force Link Good */ +#define PHY_M_PC_EN_DET_MSK (3<<8) /* Bit 9.. 8: Energy Detect Mask */ +#define PHY_M_PC_ENA_EXT_D (1<<7) /* Bit 7: Enable Ext. Distance (10BT) */ +#define PHY_M_PC_MDIX_MSK (3<<5) /* Bit 6.. 5: MDI/MDIX Config. Mask */ +#define PHY_M_PC_DIS_125CLK (1<<4) /* Bit 4: Disable 125 CLK */ +#define PHY_M_PC_MAC_POW_UP (1<<3) /* Bit 3: MAC Power up */ +#define PHY_M_PC_SQE_T_ENA (1<<2) /* Bit 2: SQE Test Enabled */ +#define PHY_M_PC_POL_R_DIS (1<<1) /* Bit 1: Polarity Reversal Disabled */ +#define PHY_M_PC_DIS_JABBER (1<<0) /* Bit 0: Disable Jabber */ + + +/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/ +#define PHY_M_PS_SPEED_MSK (3<<14) /* Bit 15..14: Speed Mask */ +#define PHY_M_PS_SPEED_1000 (1<<15) /* 10 = 1000 Mbps */ +#define PHY_M_PS_SPEED_100 (1<<14) /* 01 = 100 Mbps */ +#define PHY_M_PS_SPEED_10 0 /* 00 = 10 Mbps */ +#define PHY_M_PS_FULL_DUP (1<<13) /* Bit 13: Full Duplex */ +#define PHY_M_PS_PAGE_REC (1<<12) /* Bit 12: Page Received */ +#define PHY_M_PS_SPDUP_RES (1<<11) /* Bit 11: Speed & Duplex Resolved */ +#define PHY_M_PS_LINK_UP (1<<10) /* Bit 10: Link Up */ +#define PHY_M_PS_CABLE_MSK (3<<7) /* Bit 9.. 7: Cable Length Mask */ +#define PHY_M_PS_MDI_X_STAT (1<<6) /* Bit 6: MDI Crossover Stat (1=MDIX) */ +#define PHY_M_PS_DOWNS_STAT (1<<5) /* Bit 5: Downshift Status (1=downsh.) */ +#define PHY_M_PS_ENDET_STAT (1<<4) /* Bit 4: Energy Detect Status (1=act) */ +#define PHY_M_PS_TX_P_EN (1<<3) /* Bit 3: Tx Pause Enabled */ +#define PHY_M_PS_RX_P_EN (1<<2) /* Bit 2: Rx Pause Enabled */ +#define PHY_M_PS_POL_REV (1<<1) /* Bit 1: Polarity Reversed */ +#define PHY_M_PC_JABBER (1<<0) /* Bit 0: Jabber */ + +#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN) + +/***** PHY_MARV_INT_MASK 16 bit r/w Interrupt Mask Reg *****/ +/***** PHY_MARV_INT_STAT 16 bit r/o Interrupt Status Reg *****/ +#define PHY_M_IS_AN_ERROR (1<<15) /* Bit 15: Auto-Negotiation Error */ +#define PHY_M_IS_LSP_CHANGE (1<<14) /* Bit 14: Link Speed Changed */ +#define PHY_M_IS_DUP_CHANGE (1<<13) /* Bit 13: Duplex Mode Changed */ +#define PHY_M_IS_AN_PR (1<<12) /* Bit 12: Page Received */ +#define PHY_M_IS_AN_COMPL (1<<11) /* Bit 11: Auto-Negotiation Completed */ +#define PHY_M_IS_LST_CHANGE (1<<10) /* Bit 10: Link Status Changed */ +#define PHY_M_IS_SYMB_ERROR (1<<9) /* Bit 9: Symbol Error */ +#define PHY_M_IS_FALSE_CARR (1<<8) /* Bit 8: False Carrier */ +#define PHY_M_IS_FIFO_ERROR (1<<7) /* Bit 7: FIFO Overflow/Underrun Error */ +#define PHY_M_IS_MDI_CHANGE (1<<6) /* Bit 6: MDI Crossover Changed */ +#define PHY_M_IS_DOWNSH_DET (1<<5) /* Bit 5: Downshift Detected */ +#define PHY_M_IS_END_CHANGE (1<<4) /* Bit 4: Energy Detect Changed */ + /* Bit 3..2: reserved */ +#define PHY_M_IS_POL_CHANGE (1<<1) /* Bit 1: Polarity Changed */ +#define PHY_M_IS_JABBER (1<<0) /* Bit 0: Jabber */ + +#define PHY_M_DEF_MSK (PHY_M_IS_AN_ERROR | PHY_M_IS_AN_PR | \ + PHY_M_IS_LST_CHANGE | PHY_M_IS_FIFO_ERROR) + +/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/ +#define PHY_M_EC_M_DSC_MSK (3<<10) /* Bit 11..10: Master downshift counter */ +#define PHY_M_EC_S_DSC_MSK (3<<8) /* Bit 9.. 8: Slave downshift counter */ +#define PHY_M_EC_MAC_S_MSK (7<<4) /* Bit 6.. 4: Def. MAC interface speed */ + +#define PHY_M_EC_M_DSC(x) SHIFT10(x) /* 00=1x; 01=2x; 10=3x; 11=4x */ +#define PHY_M_EC_S_DSC(x) SHIFT8(x) /* 00=dis; 01=1x; 10=2x; 11=3x */ +#define PHY_M_EC_MAC_S(x) SHIFT4(x) /* 01X=0; 110=2.5; 111=25 (MHz) */ + +#define MAC_TX_CLK_0_MHZ 2 +#define MAC_TX_CLK_2_5_MHZ 6 +#define MAC_TX_CLK_25_MHZ 7 + +/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/ +#define PHY_M_LEDC_DIS_LED (1<<15) /* Bit 15: Disable LED */ + +#define PHY_M_LED_BL_RATE(x) SHIFT12(x) /* Bit 12..14: Blink Rate */ + +/* values for PHY_M_LED_BL_RATE() */ +#define BL_DEFAULT 0 /* no pulse stretching */ +#define BL_21MS 1 /* 21 ms to 42ms */ +#define BL_42MS 2 /* 42 ms to 84ms */ +#define BL_84MS 3 /* 84 ms to 170ms */ +#define BL_170MS 4 /* 170 ms to340ms */ +#define BL_340MS 5 /* 340 ms to670ms */ +#define BL_670MS 6 /* 670 ms to 1.3s */ +#define BL_1300MS 7 /* 1.3s to 2.7s */ + +#define PHY_M_LEDC_F_INT (1<<11) /* Bit 11: Force Interrupt */ + +#define PHY_M_LEDC_LINK_MSK (3<<3) /* Bit 4..3: Link Control */ +#define PHY_M_LEDC_DP_CTRL (1<<2) /* Bit 2: Duplex Control */ +#define PHY_M_LEDC_RX_CTRL (1<<1) /* Bit 1: Rx activity / Link */ +#define PHY_M_LEDC_TX_CTRL (1<<0) /* Bit 0: Tx activity / Link */ + +/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/ +#define PHY_M_LED_MO_DUP(x) SHIFT10(x) /* Bit 11..10: Duplex */ +#define PHY_M_LED_MO_10(x) SHIFT8(x) /* Bit 9.. 8: Link 10 */ +#define PHY_M_LED_MO_100(x) SHIFT6(x) /* Bit 7.. 6: Link 100 */ +#define PHY_M_LED_MO_1000(x) SHIFT4(x) /* Bit 5.. 4: Link 1000 */ +#define PHY_M_LED_MO_RX(x) SHIFT2(x) /* Bit 3.. 2: Rx */ +#define PHY_M_LED_MO_TX(x) SHIFT0(x) /* Bit 1.. 0: Tx */ + +#define MO_LED_NORM 0 +#define MO_LED_BLINK 1 +#define MO_LED_OFF 2 +#define MO_LED_ON 3 + +/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/ + /* Bit 15.. 7: reserved */ +#define PHY_M_EC2_FI_IMPED (1<<6) /* Bit 6: Fiber Input Impedance */ +#define PHY_M_EC2_FO_IMPED (1<<5) /* Bit 5: Fiber Output Impedance */ +#define PHY_M_EC2_FO_M_CLK (1<<4) /* Bit 4: Fiber Mode Clock Enable */ +#define PHY_M_EC2_FO_BOOST (1<<3) /* Bit 3: Fiber Output Boost */ +#define PHY_M_EC2_FO_AM_MSK 7 /* Bit 2.. 0: Fiber Output Amplitude */ + +/***** PHY_MARV_CABLE_DIAG 16 bit r/o Cable Diagnostic Reg *****/ +#define PHY_M_CABD_ENA_TEST (1<<15) /* Bit 15: Enable Test */ +#define PHY_M_CABD_STAT_MSK (3<<13) /* Bit 14..13: Status */ + /* Bit 12.. 8: reserved */ +#define PHY_M_CABD_DIST_MSK 0xff /* Bit 7.. 0: Distance */ + +/* values for Cable Diagnostic Status (11=fail; 00=OK; 10=open; 01=short) */ +#define CABD_STAT_NORMAL 0 +#define CABD_STAT_SHORT 1 +#define CABD_STAT_OPEN 2 +#define CABD_STAT_FAIL 3 + + +/* + * GMAC registers + * + * The GMAC registers are 16 or 32 bits wide. + * The GMACs host processor interface is 16 bits wide, + * therefore ALL registers will be addressed with 16 bit accesses. + * + * The following macros are provided to access the GMAC registers + * GM_IN16(), GM_OUT16, GM_IN32(), GM_OUT32(), GM_INADR(), GM_OUTADR(), + * GM_INHASH(), and GM_OUTHASH(). + * The macros are defined in SkGeHw.h. + * + * Note: NA reg = Network Address e.g DA, SA etc. + * + */ + +/* Port Registers */ +#define GM_GP_STAT 0x0000 /* 16 bit r/o General Purpose Status */ +#define GM_GP_CTRL 0x0004 /* 16 bit r/w General Purpose Control */ +#define GM_TX_CTRL 0x0008 /* 16 bit r/w Transmit Control Reg. */ +#define GM_RX_CTRL 0x000c /* 16 bit r/w Receive Control Reg. */ +#define GM_TX_FLOW_CTRL 0x0010 /* 16 bit r/w Transmit Flow Control */ +#define GM_TX_PARAM 0x0014 /* 16 bit r/w Transmit Parameter Reg. */ +#define GM_SERIAL_MODE 0x0018 /* 16 bit r/w Serial Mode Register */ + +/* Source Address Registers */ +#define GM_SRC_ADDR_1L 0x001c /* 16 bit r/w Source Address 1 (low) */ +#define GM_SRC_ADDR_1M 0x0020 /* 16 bit r/w Source Address 1 (middle) */ +#define GM_SRC_ADDR_1H 0x0024 /* 16 bit r/w Source Address 1 (high) */ +#define GM_SRC_ADDR_2L 0x0028 /* 16 bit r/w Source Address 2 (low) */ +#define GM_SRC_ADDR_2M 0x002c /* 16 bit r/w Source Address 2 (middle) */ +#define GM_SRC_ADDR_2H 0x0030 /* 16 bit r/w Source Address 2 (high) */ + +/* Multicast Address Hash Registers */ +#define GM_MC_ADDR_H1 0x0034 /* 16 bit r/w Multicast Address Hash 1 */ +#define GM_MC_ADDR_H2 0x0038 /* 16 bit r/w Multicast Address Hash 2 */ +#define GM_MC_ADDR_H3 0x003c /* 16 bit r/w Multicast Address Hash 3 */ +#define GM_MC_ADDR_H4 0x0040 /* 16 bit r/w Multicast Address Hash 4 */ + +/* Interrupt Source Registers */ +#define GM_TX_IRQ_SRC 0x0044 /* 16 bit r/o Tx Overflow IRQ Source */ +#define GM_RX_IRQ_SRC 0x0048 /* 16 bit r/o Rx Overflow IRQ Source */ +#define GM_TR_IRQ_SRC 0x004c /* 16 bit r/o Tx/Rx Over. IRQ Source */ + +/* Interrupt Mask Registers */ +#define GM_TX_IRQ_MSK 0x0050 /* 16 bit r/w Tx Overflow IRQ Mask */ +#define GM_RX_IRQ_MSK 0x0054 /* 16 bit r/w Rx Overflow IRQ Mask */ +#define GM_TR_IRQ_MSK 0x0058 /* 16 bit r/w Tx/Rx Over. IRQ Mask */ + +/* Serial Management Interface (SMI) Registers */ +#define GM_SMI_CTRL 0x0080 /* 16 bit r/w SMI Control Register */ +#define GM_SMI_DATA 0x0084 /* 16 bit r/w SMI Data Register */ +#define GM_PHY_ADDR 0x0088 /* 16 bit r/w GPHY Address Register */ + +/* MIB Counters */ +#define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */ +#define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */ + +/* + * MIB Counters base address definitions (low word) - + * use offset 4 for access to high word (32 bit r/o) + */ +#define GM_RXF_UC_OK \ + (GM_MIB_CNT_BASE + 0) /* Unicast Frames Received OK */ +#define GM_RXF_BC_OK \ + (GM_MIB_CNT_BASE + 8) /* Broadcast Frames Received OK */ +#define GM_RXF_MPAUSE \ + (GM_MIB_CNT_BASE + 16) /* Pause MAC Ctrl Frames Received */ +#define GM_RXF_MC_OK \ + (GM_MIB_CNT_BASE + 24) /* Multicast Frames Received OK */ +#define GM_RXF_FCS_ERR \ + (GM_MIB_CNT_BASE + 32) /* Rx Frame Check Seq. Error */ + /* GM_MIB_CNT_BASE + 40: reserved */ +#define GM_RXO_OK_LO \ + (GM_MIB_CNT_BASE + 48) /* Octets Received OK Low */ +#define GM_RXO_OK_HI \ + (GM_MIB_CNT_BASE + 56) /* Octets Received OK High */ +#define GM_RXO_ERR_LO \ + (GM_MIB_CNT_BASE + 64) /* Octets Received Invalid Low */ +#define GM_RXO_ERR_HI \ + (GM_MIB_CNT_BASE + 72) /* Octets Received Invalid High */ +#define GM_RXF_SHT \ + (GM_MIB_CNT_BASE + 80) /* Frames <64 Byte Received OK */ +#define GM_RXE_FRAG \ + (GM_MIB_CNT_BASE + 88) /* Frames <64 Byte Receeived with FCS Err */ +#define GM_RXF_64B \ + (GM_MIB_CNT_BASE + 96) /* 64 Byte Rx Frame */ +#define GM_RXF_127B \ + (GM_MIB_CNT_BASE + 104) /* 65-127 Byte Rx Frame */ +#define GM_RXF_255B \ + (GM_MIB_CNT_BASE + 112) /* 128-255 Byte Rx Frame */ +#define GM_RXF_511B \ + (GM_MIB_CNT_BASE + 120) /* 256-511 Byte Rx Frame */ +#define GM_RXF_1023B \ + (GM_MIB_CNT_BASE + 128) /* 512-1023 Byte Rx Frame */ +#define GM_RXF_1518B \ + (GM_MIB_CNT_BASE + 136) /* 1024-1518 Byte Rx Frame */ +#define GM_RXF_MAX_SZ \ + (GM_MIB_CNT_BASE + 144) /* 1519-MaxSize Byte Rx Frame */ +#define GM_RXF_LNG_ERR \ + (GM_MIB_CNT_BASE + 152) /* Rx Frame too Long Error */ +#define GM_RXF_JAB_PKT \ + (GM_MIB_CNT_BASE + 160) /* Rx Jabber Packet Frame */ + /* GM_MIB_CNT_BASE + 168: reserved */ +#define GM_RXE_FIFO_OV \ + (GM_MIB_CNT_BASE + 176) /* Rx FIFO overflow Event */ + /* GM_MIB_CNT_BASE + 184: reserved */ +#define GM_TXF_UC_OK \ + (GM_MIB_CNT_BASE + 192) /* Unicast Frames Xmitted OK */ +#define GM_TXF_BC_OK \ + (GM_MIB_CNT_BASE + 200) /* Broadcast Frames Xmitted OK */ +#define GM_TXF_MPAUSE \ + (GM_MIB_CNT_BASE + 208) /* Pause MAC Ctrl Frames Xmitted */ +#define GM_TXF_MC_OK \ + (GM_MIB_CNT_BASE + 216) /* Multicast Frames Xmitted OK */ +#define GM_TXO_OK_LO \ + (GM_MIB_CNT_BASE + 224) /* Octets Transmitted OK Low */ +#define GM_TXO_OK_HI \ + (GM_MIB_CNT_BASE + 232) /* Octets Transmitted OK High */ +#define GM_TXF_64B \ + (GM_MIB_CNT_BASE + 240) /* 64 Byte Tx Frame */ +#define GM_TXF_127B \ + (GM_MIB_CNT_BASE + 248) /* 65-127 Byte Tx Frame */ +#define GM_TXF_255B \ + (GM_MIB_CNT_BASE + 256) /* 128-255 Byte Tx Frame */ +#define GM_TXF_511B \ + (GM_MIB_CNT_BASE + 264) /* 256-511 Byte Tx Frame */ +#define GM_TXF_1023B \ + (GM_MIB_CNT_BASE + 272) /* 512-1023 Byte Tx Frame */ +#define GM_TXF_1518B \ + (GM_MIB_CNT_BASE + 280) /* 1024-1518 Byte Tx Frame */ +#define GM_TXF_MAX_SZ \ + (GM_MIB_CNT_BASE + 288) /* 1519-MaxSize Byte Tx Frame */ + /* GM_MIB_CNT_BASE + 296: reserved */ +#define GM_TXF_COL \ + (GM_MIB_CNT_BASE + 304) /* Tx Collision */ +#define GM_TXF_LAT_COL \ + (GM_MIB_CNT_BASE + 312) /* Tx Late Collision */ +#define GM_TXF_ABO_COL \ + (GM_MIB_CNT_BASE + 320) /* Tx aborted due to Exces. Col. */ +#define GM_TXF_MUL_COL \ + (GM_MIB_CNT_BASE + 328) /* Tx Multiple Collision */ +#define GM_TXF_SNG_COL \ + (GM_MIB_CNT_BASE + 336) /* Tx Single Collision */ +#define GM_TXE_FIFO_UR \ + (GM_MIB_CNT_BASE + 344) /* Tx FIFO Underrun Event */ + +/*----------------------------------------------------------------------------*/ +/* + * GMAC Bit Definitions + * + * If the bit access behaviour differs from the register access behaviour + * (r/w, r/o) this is documented after the bit number. + * The following bit access behaviours are used: + * (sc) self clearing + * (r/o) read only + */ + +/* GM_GP_STAT 16 bit r/o General Purpose Status Register */ + +#define GM_GPSR_SPEED (1<<15) /* Bit 15: Port Speed (1 = 100 Mbps) */ +#define GM_GPSR_DUPLEX (1<<14) /* Bit 14: Duplex Mode (1 = Full) */ +#define GM_GPSR_FC_TX_DIS (1<<13) /* Bit 13: Tx Flow Control Mode Disabled */ +#define GM_GPSR_LINK_UP (1<<12) /* Bit 12: Link Up Status */ +#define GM_GPSR_PAUSE (1<<11) /* Bit 11: Pause State */ +#define GM_GPSR_TX_ACTIVE (1<<10) /* Bit 10: Tx in Progress */ +#define GM_GPSR_EXC_COL (1<<9) /* Bit 9: Excessive Collisions Occured */ +#define GM_GPSR_LAT_COL (1<<8) /* Bit 8: Late Collisions Occured */ + /* Bit 7..6: reserved */ +#define GM_GPSR_PHY_ST_CH (1<<5) /* Bit 5: PHY Status Change */ +#define GM_GPSR_GIG_SPEED (1<<4) /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */ +#define GM_GPSR_PART_MODE (1<<3) /* Bit 3: Partition mode */ +#define GM_GPSR_FC_RX_DIS (1<<2) /* Bit 2: Rx Flow Control Mode Disabled */ +#define GM_GPSR_PROM_EN (1<<1) /* Bit 1: Promiscuous Mode Enabled */ + /* Bit 0: reserved */ + +/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ + /* Bit 15: reserved */ +#define GM_GPCR_PROM_ENA (1<<14) /* Bit 14: Enable Promiscuous Mode */ +#define GM_GPCR_FC_TX_DIS (1<<13) /* Bit 13: Disable Tx Flow Control Mode */ +#define GM_GPCR_TX_ENA (1<<12) /* Bit 12: Enable Transmit */ +#define GM_GPCR_RX_ENA (1<<11) /* Bit 11: Enable Receive */ +#define GM_GPCR_BURST_ENA (1<<10) /* Bit 10: Enable Burst Mode */ +#define GM_GPCR_LOOP_ENA (1<<9) /* Bit 9: Enable MAC Loopback Mode */ +#define GM_GPCR_PART_ENA (1<<8) /* Bit 8: Enable Partition Mode */ +#define GM_GPCR_GIGS_ENA (1<<7) /* Bit 7: Gigabit Speed (1000 Mbps) */ +#define GM_GPCR_FL_PASS (1<<6) /* Bit 6: Force Link Pass */ +#define GM_GPCR_DUP_FULL (1<<5) /* Bit 5: Full Duplex Mode */ +#define GM_GPCR_FC_RX_DIS (1<<4) /* Bit 4: Disable Rx Flow Control Mode */ +#define GM_GPCR_SPEED_100 (1<<3) /* Bit 3: Port Speed 100 Mbps */ +#define GM_GPCR_AU_DUP_DIS (1<<2) /* Bit 2: Disable Auto-Update for Duplex */ +#define GM_GPCR_AU_FCT_DIS (1<<1) /* Bit 1: Disable Auto-Update for Flow-c. */ +#define GM_GPCR_AU_SPD_DIS (1<<0) /* Bit 0: Disable Auto-Update for Speed */ + +#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) +#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS |\ + GM_GPCR_AU_SPD_DIS) + +/* GM_TX_CTRL 16 bit r/w Transmit Control Register */ + +#define GM_TXCR_FORCE_JAM (1<<15) /* Bit 15: Force Jam / Flow-Control */ +#define GM_TXCR_CRC_DIS (1<<14) /* Bit 14: Disable insertion of CRC */ +#define GM_TXCR_PAD_DIS (1<<13) /* Bit 13: Disable padding of packets */ +#define GM_TXCR_COL_THR (4<<10) /* Bit 12..10: Collision Threshold */ + +/* GM_RX_CTRL 16 bit r/w Receive Control Register */ +#define GM_RXCR_UCF_ENA (1<<15) /* Bit 15: Enable Unicast filtering */ +#define GM_RXCR_MCF_ENA (1<<14) /* Bit 14: Enable Multicast filtering */ +#define GM_RXCR_CRC_DIS (1<<13) /* Bit 13: Remove 4-byte CRC */ +#define GM_RXCR_PASS_FC (1<<12) /* Bit 12: Pass FC packets to FIFO */ + +/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */ +#define GM_TXPA_JAMLEN_MSK (0x03<<14) /* Bit 15..14: Jam Length */ +#define GM_TXPA_JAMIPG_MSK (0x1f<<9) /* Bit 13..9: Jam IPG */ +#define GM_TXPA_JAMDAT_MSK (0x1f<<4) /* Bit 8..4: IPG Jam to Data */ + /* Bit 3..0: reserved */ +#define JAM_LEN_VAL(x) SHIFT14(x) +#define JAM_IPG_VAL(x) SHIFT9(x) +#define IPG_JAM_DATA(x) SHIFT4(x) + +/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */ +#define GM_SMOD_DATABL_MSK (0x1f<<11) /* Bit 15..11: Data Blinder */ +#define GM_SMOD_LIMIT_4 (1<<10) /* Bit 10: 4 consecutive transmit trials */ +#define GM_SMOD_VLAN_ENA (1<<9) /* Bit 9: Enable VLAN (Max. Frame Length) */ +#define GM_SMOD_JUMBO_ENA (1<<8) /* Bit 8: Enable Jumbo (Max. Frame Length) */ + /* Bit 7..5: reserved */ +#define GM_SMOD_IPG_MSK 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ + +#define DATA_BLIND_VAL(x) SHIFT11(x) +#define DATA_BLIND_FAST_ETH 0x1c +#define DATA_BLIND_GIGABIT 4 + +#define IPG_VAL_FAST_ETH 0x1e +#define IPG_VAL_GIGABIT 6 + +/* GM_SMI_CTRL 16 bit r/w SMI Control Register */ + +#define GM_SMI_CT_PHY_AD(x) SHIFT11(x) +#define GM_SMI_CT_REG_AD(x) SHIFT6(x) +#define GM_SMI_CT_OP_RD (1<<5) /* Bit 5: OpCode Read (0=Write)*/ +#define GM_SMI_CT_RD_VAL (1<<4) /* Bit 4: Read Valid (Read completed) */ +#define GM_SMI_CT_BUSY (1<<3) /* Bit 3: Busy (Operation in progress) */ + /* Bit 2..0: reserved */ + +/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */ + /* Bit 15..6: reserved */ +#define GM_PAR_MIB_CLR (1<<5) /* Bit 5: Set MIB Clear Counter Mode */ +#define GM_PAR_MIB_TST (1<<4) /* Bit 4: MIB Load Counter (Test Mode) */ + /* Bit 3..0: reserved */ + +/* Receive Frame Status Encoding */ +#define GMR_FS_LEN (0xffffUL<<16) /* Bit 31..16: Rx Frame Length */ + /* Bit 15..14: reserved */ +#define GMR_FS_VLAN (1L<<13) /* Bit 13: VLAN Packet */ +#define GMR_FS_JABBER (1L<<12) /* Bit 12: Jabber Packet */ +#define GMR_FS_UN_SIZE (1L<<11) /* Bit 11: Undersize Packet */ +#define GMR_FS_MC (1L<<10) /* Bit 10: Multicast Packet */ +#define GMR_FS_BC (1L<<9) /* Bit 9: Broadcast Packet */ +#define GMR_FS_RX_OK (1L<<8) /* Bit 8: Receive OK (Good Packet) */ +#define GMR_FS_GOOD_FC (1L<<7) /* Bit 7: Good Flow-Control Packet */ +#define GMR_FS_BAD_FC (1L<<6) /* Bit 6: Bad Flow-Control Packet */ +#define GMR_FS_MII_ERR (1L<<5) /* Bit 5: MII Error */ +#define GMR_FS_LONG_ERR (1L<<4) /* Bit 4: Too Long Packet */ +#define GMR_FS_FRAGMENT (1L<<3) /* Bit 3: Fragment */ + /* Bit 2: reserved */ +#define GMR_FS_CRC_ERR (1L<<1) /* Bit 1: CRC Error */ +#define GMR_FS_RX_FF_OV (1L<<0) /* Bit 0: Rx FIFO Overflow */ + +/* + * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR) + */ +#define GMR_FS_ANY_ERR (GMR_FS_CRC_ERR | \ + GMR_FS_LONG_ERR | \ + GMR_FS_MII_ERR | \ + GMR_FS_BAD_FC | \ + GMR_FS_GOOD_FC | \ + GMR_FS_JABBER) + +/* Rx GMAC FIFO Flush Mask (default) */ +#define RX_FF_FL_DEF_MSK (GMR_FS_CRC_ERR | \ + GMR_FS_RX_FF_OV | \ + GMR_FS_MII_ERR | \ + GMR_FS_BAD_FC | \ + GMR_FS_GOOD_FC | \ + GMR_FS_UN_SIZE | \ + GMR_FS_JABBER) /* typedefs *******************************************************************/ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/skaddr.c linux.21pre4-ac2/drivers/net/sk98lin/skaddr.c --- linux.21pre4/drivers/net/sk98lin/skaddr.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/skaddr.c 2003-01-06 15:38:18.000000000 +0000 @@ -2,15 +2,15 @@ * * Name: skaddr.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.40 $ - * Date: $Date: 2001/02/14 14:04:59 $ + * Version: $Revision: 1.47 $ + * Date: $Date: 2002/09/17 06:31:10 $ * Purpose: Manage Addresses (Multicast and Unicast) and Promiscuous Mode. * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2001 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +26,39 @@ * History: * * $Log: skaddr.c,v $ + * Revision 1.47 2002/09/17 06:31:10 tschilli + * Handling of SK_PROM_MODE_ALL_MC flag in SkAddrGmacMcUpdate() + * and SkAddrGmacPromiscuousChange() fixed. + * Editorial changes. + * + * Revision 1.46 2002/08/22 07:55:41 tschilli + * New function SkGmacMcHash() for GMAC multicast hashing algorithm added. + * Editorial changes. + * + * Revision 1.45 2002/08/15 12:29:35 tschilli + * SkAddrGmacMcUpdate() and SkAddrGmacPromiscuousChange() changed. + * + * Revision 1.44 2002/08/14 12:18:03 rschmidt + * Replaced direct handling of MAC Hashing (XMAC and GMAC) + * with routine SkMacHashing(). + * Replaced wrong 3rd para 'i' with 'PortNumber' in SkMacPromiscMode(). + * + * Revision 1.43 2002/08/13 09:37:43 rschmidt + * Corrected some SK_DBG_MSG outputs. + * Replaced wrong 2nd para pAC with IoC in SkMacPromiscMode(). + * Editorial changes. + * + * Revision 1.42 2002/08/12 11:24:36 rschmidt + * Remove setting of logical MAC address GM_SRC_ADDR_2 in SkAddrInit(). + * Replaced direct handling of MAC Promiscuous Mode (XMAC and GMAC) + * with routine SkMacPromiscMode(). + * Editorial changes. + * + * Revision 1.41 2002/06/10 13:52:18 tschilli + * Changes for handling YUKON. + * All changes are internally and not visible to the programmer + * using this module. + * * Revision 1.40 2001/02/14 14:04:59 rassmann * Editorial changes. * @@ -166,13 +199,13 @@ * Description: * * This module is intended to manage multicast addresses, address override, - * and promiscuous mode on GEnesis adapters. + * and promiscuous mode on GEnesis and Yukon adapters. * * Address Layout: * port address: physical MAC address - * 1st exact match: logical MAC address - * 2nd exact match: RLMT multicast - * exact match 3-13: OS-specific multicasts + * 1st exact match: logical MAC address (GEnesis only) + * 2nd exact match: RLMT multicast (GEnesis only) + * exact match 3-13: OS-specific multicasts (GEnesis only) * * Include File Hierarchy: * @@ -183,7 +216,7 @@ #ifndef lint static const char SysKonnectFileId[] = - "@(#) $Id: skaddr.c,v 1.40 2001/02/14 14:04:59 rassmann Exp $ (C) SysKonnect."; + "@(#) $Id: skaddr.c,v 1.47 2002/09/17 06:31:10 tschilli Exp $ (C) SysKonnect."; #endif /* !defined(lint) */ #define __SKADDR_C @@ -199,6 +232,8 @@ /* defines ********************************************************************/ +#define XMAC_POLY 0xEDB88320UL /* CRC32-Poly - XMAC: Little Endian */ +#define GMAC_POLY 0x04C11DB7L /* CRC16-Poly - GMAC: Little Endian */ #define HASH_BITS 6 /* #bits in hash */ #define SK_MC_BIT 0x01 @@ -247,7 +282,7 @@ * All permanent MAC addresses are read from EPROM. * If the current MAC addresses are not already set in software, * they are set to the values of the permanent addresses. - * The current addresses are written to the corresponding XMAC. + * The current addresses are written to the corresponding MAC. * * * SK_INIT_RUN @@ -266,38 +301,26 @@ SK_IOC IoC, /* I/O context */ int Level) /* initialization level */ { - int j; - SK_U32 i; - SK_U8 *InAddr; - SK_U16 *OutAddr; + int j; + SK_U32 i; + SK_U8 *InAddr; + SK_U16 *OutAddr; SK_ADDR_PORT *pAPort; switch (Level) { case SK_INIT_DATA: - SK_MEMSET((char *)&pAC->Addr, 0, sizeof(SK_ADDR)); + SK_MEMSET((char *) &pAC->Addr, 0, sizeof(SK_ADDR)); for (i = 0; i < SK_MAX_MACS; i++) { pAPort = &pAC->Addr.Port[i]; pAPort->PromMode = SK_PROM_MODE_NONE; - - pAPort->FirstExactMatchRlmt = SK_ADDR_FIRST_MATCH_RLMT; - pAPort->FirstExactMatchDrv = SK_ADDR_FIRST_MATCH_DRV; - pAPort->NextExactMatchRlmt = SK_ADDR_FIRST_MATCH_RLMT; - pAPort->NextExactMatchDrv = SK_ADDR_FIRST_MATCH_DRV; - -#if 0 - /* Don't do this here ... */ - - /* Reset Promiscuous mode. */ - (void)SkAddrPromiscuousChange( - pAC, - IoC, - i, - SK_PROM_MODE_NONE); -#endif /* 0 */ + + pAPort->FirstExactMatchRlmt = SK_ADDR_FIRST_MATCH_RLMT; + pAPort->FirstExactMatchDrv = SK_ADDR_FIRST_MATCH_DRV; + pAPort->NextExactMatchRlmt = SK_ADDR_FIRST_MATCH_RLMT; + pAPort->NextExactMatchDrv = SK_ADDR_FIRST_MATCH_DRV; } - -#ifdef DEBUG +#ifdef xDEBUG for (i = 0; i < SK_MAX_MACS; i++) { if (pAC->Addr.Port[i].NextExactMatchRlmt < SK_ADDR_FIRST_MATCH_RLMT) { @@ -305,7 +328,6 @@ } } #endif /* DEBUG */ - /* pAC->Addr.InitDone = SK_INIT_DATA; */ break; @@ -313,8 +335,7 @@ for (i = 0; i < SK_MAX_NETS; i++) { pAC->Addr.Net[i].ActivePort = pAC->Rlmt.Net[i].ActivePort; } - -#ifdef DEBUG +#ifdef xDEBUG for (i = 0; i < SK_MAX_MACS; i++) { if (pAC->Addr.Port[i].NextExactMatchRlmt < SK_ADDR_FIRST_MATCH_RLMT) { @@ -322,10 +343,10 @@ } } #endif /* DEBUG */ - + /* Read permanent logical MAC address from Control Register File. */ for (j = 0; j < SK_MAC_ADDR_LEN; j++) { - InAddr = (SK_U8 *)&pAC->Addr.Net[0].PermanentMacAddress.a[j]; + InAddr = (SK_U8 *) &pAC->Addr.Net[0].PermanentMacAddress.a[j]; SK_IN8(IoC, B2_MAC_1 + j, InAddr); } @@ -339,7 +360,6 @@ /* Set the current logical MAC address. */ pAC->Addr.Port[pAC->Addr.Net[0].ActivePort].Exact[0] = pAC->Addr.Net[0].CurrentMacAddress; - #if SK_MAX_NETS > 1 /* Set logical MAC address for net 2 to (log | 3). */ if (!pAC->Addr.Net[1].CurrentMacAddressSet) { @@ -353,43 +373,36 @@ } #endif /* SK_MAX_NETS > 1 */ -#ifdef xDEBUG - SK_DBG_MSG( - pAC, - SK_DBGMOD_ADDR, - SK_DBGCAT_INIT, - ("Permanent MAC Address: %02X %02X %02X %02X %02X %02X\n", - pAC->Addr.PermanentMacAddress.a[0], - pAC->Addr.PermanentMacAddress.a[1], - pAC->Addr.PermanentMacAddress.a[2], - pAC->Addr.PermanentMacAddress.a[3], - pAC->Addr.PermanentMacAddress.a[4], - pAC->Addr.PermanentMacAddress.a[5])) - SK_DBG_MSG( - pAC, - SK_DBGMOD_ADDR, - SK_DBGCAT_INIT, - ("Logical MAC Address: %02X %02X %02X %02X %02X %02X\n", - pAC->Addr.CurrentMacAddress.a[0], - pAC->Addr.CurrentMacAddress.a[1], - pAC->Addr.CurrentMacAddress.a[2], - pAC->Addr.CurrentMacAddress.a[3], - pAC->Addr.CurrentMacAddress.a[4], - pAC->Addr.CurrentMacAddress.a[5])) +#ifdef DEBUG + for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) { + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_INIT, + ("Permanent MAC Address (Net%d): %02X %02X %02X %02X %02X %02X\n", + i, + pAC->Addr.Net[i].PermanentMacAddress.a[0], + pAC->Addr.Net[i].PermanentMacAddress.a[1], + pAC->Addr.Net[i].PermanentMacAddress.a[2], + pAC->Addr.Net[i].PermanentMacAddress.a[3], + pAC->Addr.Net[i].PermanentMacAddress.a[4], + pAC->Addr.Net[i].PermanentMacAddress.a[5])) + + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_INIT, + ("Logical MAC Address (Net%d): %02X %02X %02X %02X %02X %02X\n", + i, + pAC->Addr.Net[i].CurrentMacAddress.a[0], + pAC->Addr.Net[i].CurrentMacAddress.a[1], + pAC->Addr.Net[i].CurrentMacAddress.a[2], + pAC->Addr.Net[i].CurrentMacAddress.a[3], + pAC->Addr.Net[i].CurrentMacAddress.a[4], + pAC->Addr.Net[i].CurrentMacAddress.a[5])) + } #endif /* DEBUG */ -#if 0 - /* Don't do this here ... */ - - (void)SkAddrMcUpdate(pAC, IoC, pAC->Addr.ActivePort); -#endif /* 0 */ - - for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) { + for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) { pAPort = &pAC->Addr.Port[i]; /* Read permanent port addresses from Control Register File. */ for (j = 0; j < SK_MAC_ADDR_LEN; j++) { - InAddr = (SK_U8 *)&pAPort->PermanentMacAddress.a[j]; + InAddr = (SK_U8 *) &pAPort->PermanentMacAddress.a[j]; SK_IN8(IoC, B2_MAC_2 + 8 * i + j, InAddr); } @@ -403,27 +416,27 @@ pAPort->CurrentMacAddressSet = SK_TRUE; } - /* Set port's current MAC addresses. */ - OutAddr = (SK_U16 *)&pAPort->CurrentMacAddress.a[0]; - XM_OUTADDR(IoC, i, XM_SA, OutAddr); - -#ifdef xDEBUG - SK_DBG_MSG( - pAC, - SK_DBGMOD_ADDR, - SK_DBGCAT_INIT, - ("Permanent Physical MAC Address: %02X %02X %02X %02X %02X %02X\n", + /* Set port's current physical MAC address. */ + OutAddr = (SK_U16 *) &pAPort->CurrentMacAddress.a[0]; + + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + XM_OUTADDR(IoC, i, XM_SA, OutAddr); + } + else { + GM_OUTADDR(IoC, i, GM_SRC_ADDR_1L, OutAddr); + } +#ifdef DEBUG + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_INIT, + ("SkAddrInit: Permanent Physical MAC Address: %02X %02X %02X %02X %02X %02X\n", pAPort->PermanentMacAddress.a[0], pAPort->PermanentMacAddress.a[1], pAPort->PermanentMacAddress.a[2], pAPort->PermanentMacAddress.a[3], pAPort->PermanentMacAddress.a[4], pAPort->PermanentMacAddress.a[5])) - SK_DBG_MSG( - pAC, - SK_DBGMOD_ADDR, - SK_DBGCAT_INIT, - ("Phsical MAC Address: %02X %02X %02X %02X %02X %02X\n", + + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_INIT, + ("SkAddrInit: Physical MAC Address: %02X %02X %02X %02X %02X %02X\n", pAPort->CurrentMacAddress.a[0], pAPort->CurrentMacAddress.a[1], pAPort->CurrentMacAddress.a[2], @@ -436,7 +449,7 @@ break; case SK_INIT_RUN: -#ifdef DEBUG +#ifdef xDEBUG for (i = 0; i < SK_MAX_MACS; i++) { if (pAC->Addr.Port[i].NextExactMatchRlmt < SK_ADDR_FIRST_MATCH_RLMT) { @@ -453,6 +466,7 @@ } return (SK_ADDR_SUCCESS); + } /* SkAddrInit */ @@ -461,11 +475,14 @@ * SkAddrMcClear - clear the multicast table * * Description: - * This routine clears the multicast table - * (either entry 2 or entries 3-16 and InexactFilter) of the given port. + * This routine clears the multicast table. + * * If not suppressed by Flag SK_MC_SW_ONLY, the hardware is updated * immediately. * + * It calls either SkAddrXmacMcClear or SkAddrGmacMcClear, according + * to the adapter in use. The real work is done there. + * * Context: * runtime, pageable * may be called starting with SK_INIT_DATA with flag SK_MC_SW_ONLY @@ -481,21 +498,59 @@ SK_U32 PortNumber, /* Index of affected port */ int Flags) /* permanent/non-perm, sw-only */ { - int i; - - if (PortNumber >= (SK_U32)pAC->GIni.GIMacsFound) { + int ReturnCode; + + if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) { return (SK_ADDR_ILLEGAL_PORT); } + + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + ReturnCode = SkAddrXmacMcClear(pAC, IoC, PortNumber, Flags); + } + else { + ReturnCode = SkAddrGmacMcClear(pAC, IoC, PortNumber, Flags); + } + + return (ReturnCode); + +} /* SkAddrMcClear */ + + +/****************************************************************************** + * + * SkAddrXmacMcClear - clear the multicast table + * + * Description: + * This routine clears the multicast table + * (either entry 2 or entries 3-16 and InexactFilter) of the given port. + * If not suppressed by Flag SK_MC_SW_ONLY, the hardware is updated + * immediately. + * + * Context: + * runtime, pageable + * may be called starting with SK_INIT_DATA with flag SK_MC_SW_ONLY + * may be called after SK_INIT_IO without limitation + * + * Returns: + * SK_ADDR_SUCCESS + * SK_ADDR_ILLEGAL_PORT + */ +int SkAddrXmacMcClear( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber, /* Index of affected port */ +int Flags) /* permanent/non-perm, sw-only */ +{ + int i; - if (Flags & SK_ADDR_PERMANENT) { + if (Flags & SK_ADDR_PERMANENT) { /* permanent => RLMT */ /* Clear RLMT multicast addresses. */ pAC->Addr.Port[PortNumber].NextExactMatchRlmt = SK_ADDR_FIRST_MATCH_RLMT; } else { /* not permanent => DRV */ - /* Clear InexactFilter. */ - + /* Clear InexactFilter */ for (i = 0; i < 8; i++) { pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] = 0; } @@ -506,20 +561,114 @@ } if (!(Flags & SK_MC_SW_ONLY)) { - (void)SkAddrMcUpdate(pAC, IoC, PortNumber); + (void) SkAddrXmacMcUpdate(pAC, IoC, PortNumber); } return (SK_ADDR_SUCCESS); -} /* SkAddrMcClear */ + +} /* SkAddrXmacMcClear */ + + +/****************************************************************************** + * + * SkAddrGmacMcClear - clear the multicast table + * + * Description: + * This routine clears the multicast hashing table (InexactFilter) + * (either the RLMT or the driver bits) of the given port. + * + * If not suppressed by Flag SK_MC_SW_ONLY, the hardware is updated + * immediately. + * + * Context: + * runtime, pageable + * may be called starting with SK_INIT_DATA with flag SK_MC_SW_ONLY + * may be called after SK_INIT_IO without limitation + * + * Returns: + * SK_ADDR_SUCCESS + * SK_ADDR_ILLEGAL_PORT + */ +int SkAddrGmacMcClear( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber, /* Index of affected port */ +int Flags) /* permanent/non-perm, sw-only */ +{ + int i; + +#ifdef DEBUG + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("GMAC InexactFilter (not cleared): %02X %02X %02X %02X %02X %02X %02X %02X\n", + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[0], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[1], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[2], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[3], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[4], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[5], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[6], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[7])) +#endif /* DEBUG */ + + /* Clear InexactFilter */ + for (i = 0; i < 8; i++) { + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] = 0; + } + + if (Flags & SK_ADDR_PERMANENT) { /* permanent => RLMT */ + + /* Copy DRV bits to InexactFilter. */ + for (i = 0; i < 8; i++) { + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] |= + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[i]; + + /* Clear InexactRlmtFilter. */ + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[i] = 0; + + } + } + else { /* not permanent => DRV */ + + /* Copy RLMT bits to InexactFilter. */ + for (i = 0; i < 8; i++) { + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] |= + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[i]; + + /* Clear InexactDrvFilter. */ + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[i] = 0; + } + } + +#ifdef DEBUG + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("GMAC InexactFilter (cleared): %02X %02X %02X %02X %02X %02X %02X %02X\n", + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[0], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[1], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[2], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[3], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[4], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[5], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[6], + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[7])) +#endif /* DEBUG */ + + if (!(Flags & SK_MC_SW_ONLY)) { + (void) SkAddrGmacMcUpdate(pAC, IoC, PortNumber); + } + + return (SK_ADDR_SUCCESS); + +} /* SkAddrGmacMcClear */ #ifndef SK_ADDR_CHEAT /****************************************************************************** * - * SkCrc32McHash - hash multicast address + * SkXmacMcHash - hash multicast address * * Description: * This routine computes the hash value for a multicast address. + * A CRC32 algorithm is used. * * Notes: * The code was adapted from the XaQti data sheet. @@ -530,15 +679,84 @@ * Returns: * Hash value of multicast address. */ -unsigned SkCrc32McHash( +SK_U32 SkXmacMcHash( unsigned char *pMc) /* Multicast address */ { - u32 Crc; + SK_U32 Idx; + SK_U32 Bit; + SK_U32 Data; + SK_U32 Crc; + + Crc = 0xFFFFFFFFUL; + for (Idx = 0; Idx < SK_MAC_ADDR_LEN; Idx++) { + Data = *pMc++; + for (Bit = 0; Bit < 8; Bit++, Data >>= 1) { + Crc = (Crc >> 1) ^ (((Crc ^ Data) & 1) ? XMAC_POLY : 0); + } + } - Crc = ether_crc_le(SK_MAC_ADDR_LEN, pMc); + return (Crc & ((1 << HASH_BITS) - 1)); + +} /* SkXmacMcHash */ + +/****************************************************************************** + * + * SkGmacMcHash - hash multicast address + * + * Description: + * This routine computes the hash value for a multicast address. + * A CRC16 algorithm is used. + * + * Notes: + * + * + * Context: + * runtime, pageable + * + * Returns: + * Hash value of multicast address. + */ +SK_U32 SkGmacMcHash( +unsigned char *pMc) /* Multicast address */ +{ + SK_U32 Data; + SK_U32 TmpData; + SK_U32 Crc; + int Byte; + int Bit; + + Crc = 0xFFFFFFFFUL; + for (Byte = 0; Byte < 6; Byte++) { + /* Get next byte. */ + Data = (SK_U32) pMc[Byte]; + + /* Change bit order in byte. */ + TmpData = Data; + for (Bit = 0; Bit < 8; Bit++) { + if (TmpData & 1L) { + Data |= 1L << (7 - Bit); + } + else { + Data &= ~(1L << (7 - Bit)); + } + TmpData >>= 1; + } + + Crc ^= (Data << 24); + for (Bit = 0; Bit < 8; Bit++) { + if (Crc & 0x80000000) { + Crc = (Crc << 1) ^ GMAC_POLY; + } + else { + Crc <<= 1; + } + } + } + return (Crc & ((1 << HASH_BITS) - 1)); -} /* SkCrc32McHash */ + +} /* SkGmacMcHash */ #endif /* not SK_ADDR_CHEAT */ @@ -549,11 +767,57 @@ * Description: * This routine enables reception for a given address on the given port. * + * It calls either SkAddrXmacMcAdd or SkAddrGmacMcAdd, according to the + * adapter in use. The real work is done there. + * * Notes: * The return code is only valid for SK_PROM_MODE_NONE. * - * In the current version, only RLMT may add addresses to the non-active - * port. + * Context: + * runtime, pageable + * may be called after SK_INIT_DATA + * + * Returns: + * SK_MC_FILTERING_EXACT + * SK_MC_FILTERING_INEXACT + * SK_MC_ILLEGAL_ADDRESS + * SK_MC_ILLEGAL_PORT + * SK_MC_RLMT_OVERFLOW + */ +int SkAddrMcAdd( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber, /* Port Number */ +SK_MAC_ADDR *pMc, /* multicast address to be added */ +int Flags) /* permanent/non-permanent */ +{ + int ReturnCode; + + if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) { + return (SK_ADDR_ILLEGAL_PORT); + } + + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + ReturnCode = SkAddrXmacMcAdd(pAC, IoC, PortNumber, pMc, Flags); + } + else { + ReturnCode = SkAddrGmacMcAdd(pAC, IoC, PortNumber, pMc, Flags); + } + + return (ReturnCode); + +} /* SkAddrMcAdd */ + + +/****************************************************************************** + * + * SkAddrXmacMcAdd - add a multicast address to a port + * + * Description: + * This routine enables reception for a given address on the given port. + * + * Notes: + * The return code is only valid for SK_PROM_MODE_NONE. * * The multicast bit is only checked if there are no free exact match * entries. @@ -566,28 +830,23 @@ * SK_MC_FILTERING_EXACT * SK_MC_FILTERING_INEXACT * SK_MC_ILLEGAL_ADDRESS - * SK_MC_ILLEGAL_PORT * SK_MC_RLMT_OVERFLOW */ -int SkAddrMcAdd( +int SkAddrXmacMcAdd( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* I/O context */ SK_U32 PortNumber, /* Port Number */ SK_MAC_ADDR *pMc, /* multicast address to be added */ -int Flags) /* permanent/non-permanent */ +int Flags) /* permanent/non-permanent */ { int i; SK_U8 Inexact; #ifndef SK_ADDR_CHEAT - unsigned HashBit; + SK_U32 HashBit; #endif /* !defined(SK_ADDR_CHEAT) */ - if (PortNumber >= (SK_U32)pAC->GIni.GIMacsFound) { - return (SK_ADDR_ILLEGAL_PORT); - } - - if (Flags & SK_ADDR_PERMANENT) { -#ifdef DEBUG + if (Flags & SK_ADDR_PERMANENT) { /* permanent => RLMT */ +#ifdef xDEBUG if (pAC->Addr.Port[PortNumber].NextExactMatchRlmt < SK_ADDR_FIRST_MATCH_RLMT) { Next0[PortNumber] |= 1; @@ -600,7 +859,7 @@ return (SK_MC_RLMT_OVERFLOW); } - /* Set an RLMT multicast address. */ + /* Set a RLMT multicast address. */ pAC->Addr.Port[PortNumber].Exact[ pAC->Addr.Port[PortNumber].NextExactMatchRlmt++] = *pMc; @@ -608,15 +867,7 @@ return (SK_MC_FILTERING_EXACT); } -#if 0 - /* Not PERMANENT => DRV */ - if (PortNumber != pAC->Addr.ActivePort) { - /* Only RLMT is allowed to do this. */ - return (SK_MC_ILLEGAL_PORT); - } -#endif /* 0 */ - -#ifdef DEBUG +#ifdef xDEBUG if (pAC->Addr.Port[PortNumber].NextExactMatchDrv < SK_ADDR_FIRST_MATCH_DRV) { Next0[PortNumber] |= 2; @@ -630,22 +881,19 @@ pAC->Addr.Port[PortNumber].Exact[ pAC->Addr.Port[PortNumber].NextExactMatchDrv++] = *pMc; - /* Clear InexactFilter. */ + /* Clear InexactFilter */ for (i = 0; i < 8; i++) { pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] = 0; } } else { if (!(pMc->a[0] & SK_MC_BIT)) { - /* - * Hashing only possible with - * multicast addresses. - */ + /* Hashing only possible with multicast addresses. */ return (SK_MC_ILLEGAL_ADDRESS); } #ifndef SK_ADDR_CHEAT /* Compute hash value of address. */ - HashBit = 63 - SkCrc32McHash(&pMc->a[0]); + HashBit = 63 - SkXmacMcHash(&pMc->a[0]); /* Add bit to InexactFilter. */ pAC->Addr.Port[PortNumber].InexactFilter.Bytes[HashBit / 8] |= @@ -668,90 +916,223 @@ else { return (SK_MC_FILTERING_INEXACT); } -} /* SkAddrMcAdd */ + +} /* SkAddrXmacMcAdd */ /****************************************************************************** * - * SkAddrMcUpdate - update the HW MC address table and set the MAC address + * SkAddrGmacMcAdd - add a multicast address to a port * * Description: - * This routine enables reception of the addresses contained in a local - * table for a given port. - * It also programs the port's current physical MAC address. + * This routine enables reception for a given address on the given port. * * Notes: * The return code is only valid for SK_PROM_MODE_NONE. * * Context: * runtime, pageable - * may be called after SK_INIT_IO + * may be called after SK_INIT_DATA * * Returns: - * SK_MC_FILTERING_EXACT * SK_MC_FILTERING_INEXACT - * SK_ADDR_ILLEGAL_PORT + * SK_MC_ILLEGAL_ADDRESS */ -int SkAddrMcUpdate( -SK_AC *pAC, /* adapter context */ -SK_IOC IoC, /* I/O context */ -SK_U32 PortNumber) /* Port Number */ +int SkAddrGmacMcAdd( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber, /* Port Number */ +SK_MAC_ADDR *pMc, /* multicast address to be added */ +int Flags) /* permanent/non-permanent */ { - SK_U32 i; - SK_U8 Inexact; - SK_U16 *OutAddr; - SK_U16 LoMode; /* Lower 16 bits of XMAC Mode Reg. */ - SK_ADDR_PORT *pAPort; - - if (PortNumber >= (SK_U32)pAC->GIni.GIMacsFound) { - return (SK_ADDR_ILLEGAL_PORT); + int i; +#ifndef SK_ADDR_CHEAT + SK_U32 HashBit; +#endif /* !defined(SK_ADDR_CHEAT) */ + + if (!(pMc->a[0] & SK_MC_BIT)) { + /* Hashing only possible with multicast addresses. */ + return (SK_MC_ILLEGAL_ADDRESS); } - - SK_DBG_MSG( - pAC, - SK_DBGMOD_ADDR, - SK_DBGCAT_CTRL, - ("SkAddrMcUpdate on Port %u.\n", PortNumber)) - pAPort = &pAC->Addr.Port[PortNumber]; - +#ifndef SK_ADDR_CHEAT + + /* Compute hash value of address. */ + HashBit = SkGmacMcHash(&pMc->a[0]); + + if (Flags & SK_ADDR_PERMANENT) { /* permanent => RLMT */ + + /* Add bit to InexactRlmtFilter. */ + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[HashBit / 8] |= + 1 << (HashBit % 8); + + /* Copy bit to InexactFilter. */ + for (i = 0; i < 8; i++) { + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] |= + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[i]; + } #ifdef DEBUG - SK_DBG_MSG( - pAC, - SK_DBGMOD_ADDR, - SK_DBGCAT_CTRL, - ("Next0 on Port %d: %d\n", PortNumber, Next0[PortNumber])) + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("GMAC InexactRlmtFilter: %02X %02X %02X %02X %02X %02X %02X %02X\n", + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[0], + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[1], + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[2], + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[3], + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[4], + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[5], + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[6], + pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[7])) #endif /* DEBUG */ - - /* Start with 0 to also program the logical MAC address. */ - for (i = 0; i < pAPort->NextExactMatchRlmt; i++) { - /* Set exact match address i on HW. */ - OutAddr = (SK_U16 *)&pAPort->Exact[i].a[0]; - XM_OUTADDR(IoC, PortNumber, XM_EXM(i), OutAddr); + } + else { /* not permanent => DRV */ + + /* Add bit to InexactDrvFilter. */ + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[HashBit / 8] |= + 1 << (HashBit % 8); + + /* Copy bit to InexactFilter. */ + for (i = 0; i < 8; i++) { + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] |= + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[i]; + } +#ifdef DEBUG + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("GMAC InexactDrvFilter: %02X %02X %02X %02X %02X %02X %02X %02X\n", + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[0], + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[1], + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[2], + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[3], + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[4], + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[5], + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[6], + pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[7])) +#endif /* DEBUG */ + } + +#else /* SK_ADDR_CHEAT */ + + /* Set all bits in InexactFilter. */ + for (i = 0; i < 8; i++) { + pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] = 0xFF; + } +#endif /* SK_ADDR_CHEAT */ + + return (SK_MC_FILTERING_INEXACT); + +} /* SkAddrGmacMcAdd */ + + +/****************************************************************************** + * + * SkAddrMcUpdate - update the HW MC address table and set the MAC address + * + * Description: + * This routine enables reception of the addresses contained in a local + * table for a given port. + * It also programs the port's current physical MAC address. + * + * It calls either SkAddrXmacMcUpdate or SkAddrGmacMcUpdate, according + * to the adapter in use. The real work is done there. + * + * Notes: + * The return code is only valid for SK_PROM_MODE_NONE. + * + * Context: + * runtime, pageable + * may be called after SK_INIT_IO + * + * Returns: + * SK_MC_FILTERING_EXACT + * SK_MC_FILTERING_INEXACT + * SK_ADDR_ILLEGAL_PORT + */ +int SkAddrMcUpdate( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber) /* Port Number */ +{ + int ReturnCode; + + if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) { + return (SK_ADDR_ILLEGAL_PORT); + } + + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + ReturnCode = SkAddrXmacMcUpdate(pAC, IoC, PortNumber); + } + else { + ReturnCode = SkAddrGmacMcUpdate(pAC, IoC, PortNumber); + } + + return (ReturnCode); + +} /* SkAddrMcUpdate */ + + +/****************************************************************************** + * + * SkAddrXmacMcUpdate - update the HW MC address table and set the MAC address + * + * Description: + * This routine enables reception of the addresses contained in a local + * table for a given port. + * It also programs the port's current physical MAC address. + * + * Notes: + * The return code is only valid for SK_PROM_MODE_NONE. + * + * Context: + * runtime, pageable + * may be called after SK_INIT_IO + * + * Returns: + * SK_MC_FILTERING_EXACT + * SK_MC_FILTERING_INEXACT + * SK_ADDR_ILLEGAL_PORT + */ +int SkAddrXmacMcUpdate( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber) /* Port Number */ +{ + SK_U32 i; + SK_U8 Inexact; + SK_U16 *OutAddr; + SK_ADDR_PORT *pAPort; + + SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("SkAddrXmacMcUpdate on Port %u.\n", PortNumber)) + + pAPort = &pAC->Addr.Port[PortNumber]; + +#ifdef DEBUG + SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("Next0 on Port %d: %d\n", PortNumber, Next0[PortNumber])) +#endif /* DEBUG */ + + /* Start with 0 to also program the logical MAC address. */ + for (i = 0; i < pAPort->NextExactMatchRlmt; i++) { + /* Set exact match address i on XMAC */ + OutAddr = (SK_U16 *) &pAPort->Exact[i].a[0]; + XM_OUTADDR(IoC, PortNumber, XM_EXM(i), OutAddr); } - /* Clear other permanent exact match addresses on HW. */ + /* Clear other permanent exact match addresses on XMAC */ if (pAPort->NextExactMatchRlmt <= SK_ADDR_LAST_MATCH_RLMT) { - SkXmClrExactAddr( - pAC, - IoC, - PortNumber, - pAPort->NextExactMatchRlmt, + + SkXmClrExactAddr(pAC, IoC, PortNumber, pAPort->NextExactMatchRlmt, SK_ADDR_LAST_MATCH_RLMT); } for (i = pAPort->FirstExactMatchDrv; i < pAPort->NextExactMatchDrv; i++) { - OutAddr = (SK_U16 *)&pAPort->Exact[i].a[0]; + OutAddr = (SK_U16 *) &pAPort->Exact[i].a[0]; XM_OUTADDR(IoC, PortNumber, XM_EXM(i), OutAddr); } - /* Clear other non-permanent exact match addresses on HW. */ + /* Clear other non-permanent exact match addresses on XMAC */ if (pAPort->NextExactMatchDrv <= SK_ADDR_LAST_MATCH_DRV) { - SkXmClrExactAddr( - pAC, - IoC, - PortNumber, - pAPort->NextExactMatchDrv, + + SkXmClrExactAddr(pAC, IoC, PortNumber, pAPort->NextExactMatchDrv, SK_ADDR_LAST_MATCH_DRV); } @@ -760,36 +1141,33 @@ } if (pAPort->PromMode & SK_PROM_MODE_ALL_MC) { + /* Set all bits in 64-bit hash register. */ XM_OUTHASH(IoC, PortNumber, XM_HSM, &OnesHash); - - /* Set bit 15 in mode register. */ - XM_IN16(IoC, PortNumber, XM_MODE, &LoMode); - LoMode |= XM_MD_ENA_HSH; - XM_OUT16(IoC, PortNumber, XM_MODE, LoMode); + + /* Enable Hashing */ + SkMacHashing(pAC, IoC, PortNumber, SK_TRUE); } else if (Inexact != 0) { + /* Set 64-bit hash register to InexactFilter. */ XM_OUTHASH(IoC, PortNumber, XM_HSM, &pAPort->InexactFilter.Bytes[0]); - - /* Set bit 15 in mode register. */ - XM_IN16(IoC, PortNumber, XM_MODE, &LoMode); - LoMode |= XM_MD_ENA_HSH; - XM_OUT16(IoC, PortNumber, XM_MODE, LoMode); + + /* Enable Hashing */ + SkMacHashing(pAC, IoC, PortNumber, SK_TRUE); } else { - /* Clear bit 15 in mode register. */ - XM_IN16(IoC, PortNumber, XM_MODE, &LoMode); - LoMode &= ~XM_MD_ENA_HSH; - XM_OUT16(IoC, PortNumber, XM_MODE, LoMode); + /* Disable Hashing */ + SkMacHashing(pAC, IoC, PortNumber, SK_FALSE); } if (pAPort->PromMode != SK_PROM_MODE_NONE) { - (void)SkAddrPromiscuousChange(pAC, IoC, PortNumber, pAPort->PromMode); + (void) SkAddrXmacPromiscuousChange(pAC, IoC, PortNumber, pAPort->PromMode); } - /* Set port's current MAC address. */ - OutAddr = (SK_U16 *)&pAPort->CurrentMacAddress.a[0]; + /* Set port's current physical MAC address. */ + OutAddr = (SK_U16 *) &pAPort->CurrentMacAddress.a[0]; + XM_OUTADDR(IoC, PortNumber, XM_SA, OutAddr); #ifdef xDEBUG @@ -798,13 +1176,13 @@ SK_U16 *InAddr; /* Get exact match address i from port PortNumber. */ - InAddr = (SK_U16 *)&InAddr8[0]; + InAddr = (SK_U16 *) &InAddr8[0]; + XM_INADDR(IoC, PortNumber, XM_EXM(i), InAddr); - SK_DBG_MSG( - pAC, - SK_DBGMOD_ADDR, - SK_DBGCAT_CTRL, - ("MC address %d on Port %u: %02x %02x %02x %02x %02x %02x -- %02x %02x %02x %02x %02x %02x.\n", + + SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("SkAddrXmacMcUpdate: MC address %d on Port %u: ", + "%02x %02x %02x %02x %02x %02x -- %02x %02x %02x %02x %02x %02x\n", i, PortNumber, InAddr8[0], @@ -829,7 +1207,113 @@ else { return (SK_MC_FILTERING_INEXACT); } -} /* SkAddrMcUpdate */ + +} /* SkAddrXmacMcUpdate */ + + +/****************************************************************************** + * + * SkAddrGmacMcUpdate - update the HW MC address table and set the MAC address + * + * Description: + * This routine enables reception of the addresses contained in a local + * table for a given port. + * It also programs the port's current physical MAC address. + * + * Notes: + * The return code is only valid for SK_PROM_MODE_NONE. + * + * Context: + * runtime, pageable + * may be called after SK_INIT_IO + * + * Returns: + * SK_MC_FILTERING_EXACT + * SK_MC_FILTERING_INEXACT + * SK_ADDR_ILLEGAL_PORT + */ +int SkAddrGmacMcUpdate( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber) /* Port Number */ +{ + SK_U32 i; + SK_U8 Inexact; + SK_U16 *OutAddr; + SK_ADDR_PORT *pAPort; + + SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("SkAddrGmacMcUpdate on Port %u.\n", PortNumber)) + + pAPort = &pAC->Addr.Port[PortNumber]; + +#ifdef DEBUG + SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("Next0 on Port %d: %d\n", PortNumber, Next0[PortNumber])) +#endif /* DEBUG */ + + for (Inexact = 0, i = 0; i < 8; i++) { + Inexact |= pAPort->InexactFilter.Bytes[i]; + } + + /* Set 64-bit hash register to InexactFilter. */ + GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1, + &pAPort->InexactFilter.Bytes[0]); + + if (pAPort->PromMode & SK_PROM_MODE_ALL_MC) { + + /* Set all bits in 64-bit hash register. */ + GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1, &OnesHash); + + /* Enable Hashing */ + SkMacHashing(pAC, IoC, PortNumber, SK_TRUE); + } + else { + /* Enable Hashing. */ + SkMacHashing(pAC, IoC, PortNumber, SK_TRUE); + } + + if (pAPort->PromMode != SK_PROM_MODE_NONE) { + (void) SkAddrGmacPromiscuousChange(pAC, IoC, PortNumber, pAPort->PromMode); + } + + /* Set port's current physical MAC address. */ + OutAddr = (SK_U16 *) &pAPort->CurrentMacAddress.a[0]; + GM_OUTADDR(IoC, PortNumber, GM_SRC_ADDR_1L, OutAddr); + + /* Set port's current logical MAC address. */ + OutAddr = (SK_U16 *) &pAPort->Exact[0].a[0]; + GM_OUTADDR(IoC, PortNumber, GM_SRC_ADDR_2L, OutAddr); + +#ifdef DEBUG + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("SkAddrGmacMcUpdate: Permanent Physical MAC Address: %02X %02X %02X %02X %02X %02X\n", + pAPort->Exact[0].a[0], + pAPort->Exact[0].a[1], + pAPort->Exact[0].a[2], + pAPort->Exact[0].a[3], + pAPort->Exact[0].a[4], + pAPort->Exact[0].a[5])) + + SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("SkAddrGmacMcUpdate: Physical MAC Address: %02X %02X %02X %02X %02X %02X\n", + pAPort->CurrentMacAddress.a[0], + pAPort->CurrentMacAddress.a[1], + pAPort->CurrentMacAddress.a[2], + pAPort->CurrentMacAddress.a[3], + pAPort->CurrentMacAddress.a[4], + pAPort->CurrentMacAddress.a[5])) +#endif /* DEBUG */ + + /* Determine return value. */ + if (Inexact == 0 && pAPort->PromMode == 0) { + return (SK_MC_FILTERING_EXACT); + } + else { + return (SK_MC_FILTERING_INEXACT); + } + +} /* SkAddrGmacMcUpdate */ /****************************************************************************** @@ -863,7 +1347,7 @@ NetNumber = pAC->Rlmt.Port[PortNumber].Net->NetNumber; - if (PortNumber >= (SK_U32)pAC->GIni.GIMacsFound) { + if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) { return (SK_ADDR_ILLEGAL_PORT); } @@ -877,7 +1361,7 @@ if (Flags & SK_ADDR_SET_LOGICAL) { /* Activate logical MAC address. */ /* Parameter *pNewAddr is ignored. */ - for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) { + for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) { if (!pAC->Addr.Port[i].CurrentMacAddressSet) { return (SK_ADDR_TOO_EARLY); } @@ -891,12 +1375,12 @@ pAC->Addr.Net[NetNumber].CurrentMacAddress; /* Write address to first exact match entry of active port. */ - (void)SkAddrMcUpdate(pAC, IoC, PortNumber); + (void) SkAddrMcUpdate(pAC, IoC, PortNumber); } else if (Flags & SK_ADDR_CLEAR_LOGICAL) { /* Deactivate logical MAC address. */ /* Parameter *pNewAddr is ignored. */ - for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) { + for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) { if (!pAC->Addr.Port[i].CurrentMacAddressSet) { return (SK_ADDR_TOO_EARLY); } @@ -911,7 +1395,7 @@ } /* Write address to first exact match entry of active port. */ - (void)SkAddrMcUpdate(pAC, IoC, PortNumber); + (void) SkAddrMcUpdate(pAC, IoC, PortNumber); } else if (Flags & SK_ADDR_PHYSICAL_ADDRESS) { /* Physical MAC address. */ if (SK_ADDR_EQUAL(pNewAddr->a, @@ -919,7 +1403,7 @@ return (SK_ADDR_DUPLICATE_ADDRESS); } - for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) { + for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) { if (!pAC->Addr.Port[i].CurrentMacAddressSet) { return (SK_ADDR_TOO_EARLY); } @@ -939,9 +1423,15 @@ pAC->Addr.Port[PortNumber].CurrentMacAddress; pAC->Addr.Port[PortNumber].CurrentMacAddress = *pNewAddr; - /* Change port's address. */ - OutAddr = (SK_U16 *)pNewAddr; - XM_OUTADDR(IoC, PortNumber, XM_SA, OutAddr); + /* Change port's physical MAC address. */ + OutAddr = (SK_U16 *) pNewAddr; + + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + XM_OUTADDR(IoC, PortNumber, XM_SA, OutAddr); + } + else { + GM_OUTADDR(IoC, PortNumber, GM_SRC_ADDR_1L, OutAddr); + } /* Report address change to RLMT. */ Para.Para32[0] = PortNumber; @@ -954,7 +1444,7 @@ return (SK_ADDR_SUCCESS); } - for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) { + for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) { if (!pAC->Addr.Port[i].CurrentMacAddressSet) { return (SK_ADDR_TOO_EARLY); } @@ -971,24 +1461,18 @@ pAC->Addr.Net[NetNumber].CurrentMacAddress = *pNewAddr; pAC->Addr.Port[PortNumber].Exact[0] = *pNewAddr; - #ifdef DEBUG - SK_DBG_MSG( - pAC, - SK_DBGMOD_ADDR, - SK_DBGCAT_CTRL, - ("Permanent MAC Address: %02X %02X %02X %02X %02X %02X\n", + SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("SkAddrOverride: Permanent MAC Address: %02X %02X %02X %02X %02X %02X\n", pAC->Addr.Net[NetNumber].PermanentMacAddress.a[0], pAC->Addr.Net[NetNumber].PermanentMacAddress.a[1], pAC->Addr.Net[NetNumber].PermanentMacAddress.a[2], pAC->Addr.Net[NetNumber].PermanentMacAddress.a[3], pAC->Addr.Net[NetNumber].PermanentMacAddress.a[4], pAC->Addr.Net[NetNumber].PermanentMacAddress.a[5])) - SK_DBG_MSG( - pAC, - SK_DBGMOD_ADDR, - SK_DBGCAT_CTRL, - ("New logical MAC Address: %02X %02X %02X %02X %02X %02X\n", + + SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL, + ("SkAddrOverride: New logical MAC Address: %02X %02X %02X %02X %02X %02X\n", pAC->Addr.Net[NetNumber].CurrentMacAddress.a[0], pAC->Addr.Net[NetNumber].CurrentMacAddress.a[1], pAC->Addr.Net[NetNumber].CurrentMacAddress.a[2], @@ -998,10 +1482,11 @@ #endif /* DEBUG */ /* Write address to first exact match entry of active port. */ - (void)SkAddrMcUpdate(pAC, IoC, PortNumber); + (void) SkAddrMcUpdate(pAC, IoC, PortNumber); } return (SK_ADDR_SUCCESS); + } /* SkAddrOverride */ @@ -1015,6 +1500,10 @@ * - all LLC frames * - all MC frames * + * It calls either SkAddrXmacPromiscuousChange or + * SkAddrGmacPromiscuousChange, according to the adapter in use. + * The real work is done there. + * * Context: * runtime, pageable * may be called after SK_INIT_IO @@ -1029,6 +1518,48 @@ SK_U32 PortNumber, /* port whose promiscuous mode changes */ int NewPromMode) /* new promiscuous mode */ { + int ReturnCode; + + if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) { + return (SK_ADDR_ILLEGAL_PORT); + } + + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + ReturnCode = SkAddrXmacPromiscuousChange(pAC, IoC, PortNumber, NewPromMode); + } + else { + ReturnCode = SkAddrGmacPromiscuousChange(pAC, IoC, PortNumber, NewPromMode); + } + + return (ReturnCode); + +} /* SkAddrPromiscuousChange */ + + +/****************************************************************************** + * + * SkAddrXmacPromiscuousChange - set promiscuous mode for given port + * + * Description: + * This routine manages promiscuous mode: + * - none + * - all LLC frames + * - all MC frames + * + * Context: + * runtime, pageable + * may be called after SK_INIT_IO + * + * Returns: + * SK_ADDR_SUCCESS + * SK_ADDR_ILLEGAL_PORT + */ +int SkAddrXmacPromiscuousChange( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber, /* port whose promiscuous mode changes */ +int NewPromMode) /* new promiscuous mode */ +{ int i; SK_BOOL InexactModeBit; SK_U8 Inexact; @@ -1037,14 +1568,11 @@ SK_U16 LoMode; /* Lower 16 bits of XMAC Mode Register. */ int CurPromMode = SK_PROM_MODE_NONE; - if (PortNumber >= (SK_U32)pAC->GIni.GIMacsFound) { - return (SK_ADDR_ILLEGAL_PORT); - } - /* Read CurPromMode from Hardware. */ XM_IN16(IoC, PortNumber, XM_MODE, &LoMode); - if (LoMode & XM_MD_ENA_PROM) { + if ((LoMode & XM_MD_ENA_PROM) != 0) { + /* Promiscuous mode! */ CurPromMode |= SK_PROM_MODE_LLC; } @@ -1055,12 +1583,12 @@ CurPromMode |= (pAC->Addr.Port[PortNumber].PromMode & SK_PROM_MODE_ALL_MC); } else { - /* Read InexactModeBit (bit 15 in mode register). */ + /* Get InexactModeBit (bit XM_MD_ENA_HASH in mode register) */ XM_IN16(IoC, PortNumber, XM_MODE, &LoMode); - InexactModeBit = (LoMode & XM_MD_ENA_HSH) != 0; + InexactModeBit = (LoMode & XM_MD_ENA_HASH) != 0; - /* Read 64-bit hash register from HW. */ + /* Read 64-bit hash register from XMAC */ XM_INHASH(IoC, PortNumber, XM_HSM, &HwInexactFilter.Bytes[0]); for (HwInexact = 0xFF, i = 0; i < 8; i++) { @@ -1080,13 +1608,12 @@ if ((NewPromMode & SK_PROM_MODE_ALL_MC) && !(CurPromMode & SK_PROM_MODE_ALL_MC)) { /* All MC. */ + /* Set all bits in 64-bit hash register. */ XM_OUTHASH(IoC, PortNumber, XM_HSM, &OnesHash); - /* Set bit 15 in mode register. */ - XM_IN16(IoC, PortNumber, XM_MODE, &LoMode); - LoMode |= XM_MD_ENA_HSH; - XM_OUT16(IoC, PortNumber, XM_MODE, LoMode); + /* Enable Hashing */ + SkMacHashing(pAC, IoC, PortNumber, SK_TRUE); } else if ((CurPromMode & SK_PROM_MODE_ALL_MC) && !(NewPromMode & SK_PROM_MODE_ALL_MC)) { /* Norm MC. */ @@ -1094,55 +1621,118 @@ Inexact |= pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i]; } if (Inexact == 0) { - /* Clear bit 15 in mode register. */ - XM_IN16(IoC, PortNumber, XM_MODE, &LoMode); - LoMode &= ~XM_MD_ENA_HSH; - XM_OUT16(IoC, PortNumber, XM_MODE, LoMode); + /* Disable Hashing */ + SkMacHashing(pAC, IoC, PortNumber, SK_FALSE); } else { /* Set 64-bit hash register to InexactFilter. */ - XM_OUTHASH( - IoC, - PortNumber, - XM_HSM, + XM_OUTHASH(IoC, PortNumber, XM_HSM, &pAC->Addr.Port[PortNumber].InexactFilter.Bytes[0]); - /* Set bit 15 in mode register. */ - XM_IN16(IoC, PortNumber, XM_MODE, &LoMode); - LoMode |= XM_MD_ENA_HSH; - XM_OUT16(IoC, PortNumber, XM_MODE, LoMode); + /* Enable Hashing */ + SkMacHashing(pAC, IoC, PortNumber, SK_TRUE); } } if ((NewPromMode & SK_PROM_MODE_LLC) && !(CurPromMode & SK_PROM_MODE_LLC)) { /* Prom. LLC */ - /* Set promiscuous bit in mode register. */ - XM_IN16(IoC, PortNumber, XM_MODE, &LoMode); - -#if 0 - /* Receive MAC frames. */ - LoMode |= XM_MD_RX_MCTRL; -#endif /* 0 */ - - LoMode |= XM_MD_ENA_PROM; - XM_OUT16(IoC, PortNumber, XM_MODE, LoMode); + /* Set the MAC in Promiscuous Mode */ + SkMacPromiscMode(pAC, IoC, PortNumber, SK_TRUE); } else if ((CurPromMode & SK_PROM_MODE_LLC) && !(NewPromMode & SK_PROM_MODE_LLC)) { /* Norm. LLC. */ - /* Clear promiscuous bit in mode register. */ - XM_IN16(IoC, PortNumber, XM_MODE, &LoMode); + /* Clear Promiscuous Mode */ + SkMacPromiscMode(pAC, IoC, PortNumber, SK_FALSE); + } + + return (SK_ADDR_SUCCESS); + +} /* SkAddrXmacPromiscuousChange */ + + +/****************************************************************************** + * + * SkAddrGmacPromiscuousChange - set promiscuous mode for given port + * + * Description: + * This routine manages promiscuous mode: + * - none + * - all LLC frames + * - all MC frames + * + * Context: + * runtime, pageable + * may be called after SK_INIT_IO + * + * Returns: + * SK_ADDR_SUCCESS + * SK_ADDR_ILLEGAL_PORT + */ +int SkAddrGmacPromiscuousChange( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* I/O context */ +SK_U32 PortNumber, /* port whose promiscuous mode changes */ +int NewPromMode) /* new promiscuous mode */ +{ + SK_U16 ReceiveControl; /* GMAC Receive Control Register */ + int CurPromMode = SK_PROM_MODE_NONE; + + /* Read CurPromMode from Hardware. */ + GM_IN16(IoC, PortNumber, GM_RX_CTRL, &ReceiveControl); + + if ((ReceiveControl & (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA)) == 0) { + /* Promiscuous mode! */ + CurPromMode |= SK_PROM_MODE_LLC; + } + + if ((ReceiveControl & GM_RXCR_MCF_ENA) == 0) { + /* All Multicast mode! */ + CurPromMode |= (pAC->Addr.Port[PortNumber].PromMode & SK_PROM_MODE_ALL_MC); + } -#if 0 - /* Don't receive MAC frames. */ - LoMode &= ~XM_MD_RX_MCTRL; -#endif /* 0 */ + pAC->Addr.Port[PortNumber].PromMode = NewPromMode; + + if (NewPromMode == CurPromMode) { + return (SK_ADDR_SUCCESS); + } + + if ((NewPromMode & SK_PROM_MODE_ALL_MC) && + !(CurPromMode & SK_PROM_MODE_ALL_MC)) { /* All MC */ - LoMode &= ~XM_MD_ENA_PROM; - XM_OUT16(IoC, PortNumber, XM_MODE, LoMode); + /* Set all bits in 64-bit hash register. */ + GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1, &OnesHash); + + /* Enable Hashing */ + SkMacHashing(pAC, IoC, PortNumber, SK_TRUE); } + if ((CurPromMode & SK_PROM_MODE_ALL_MC) && + !(NewPromMode & SK_PROM_MODE_ALL_MC)) { /* Norm. MC */ + + /* Set 64-bit hash register to InexactFilter. */ + GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1, + &pAC->Addr.Port[PortNumber].InexactFilter.Bytes[0]); + + /* Enable Hashing. */ + SkMacHashing(pAC, IoC, PortNumber, SK_TRUE); + } + + if ((NewPromMode & SK_PROM_MODE_LLC) && + !(CurPromMode & SK_PROM_MODE_LLC)) { /* Prom. LLC */ + + /* Set the MAC to Promiscuous Mode. */ + SkMacPromiscMode(pAC, IoC, PortNumber, SK_TRUE); + } + else if ((CurPromMode & SK_PROM_MODE_LLC) && + !(NewPromMode & SK_PROM_MODE_LLC)) { /* Norm. LLC */ + + /* Clear Promiscuous Mode. */ + SkMacPromiscMode(pAC, IoC, PortNumber, SK_FALSE); + } + return (SK_ADDR_SUCCESS); -} /* SkAddrPromiscuousChange */ + +} /* SkAddrGmacPromiscuousChange */ /****************************************************************************** @@ -1163,7 +1753,7 @@ int SkAddrSwap( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* I/O context */ -SK_U32 FromPortNumber, /* Port1 Index */ +SK_U32 FromPortNumber, /* Port1 Index */ SK_U32 ToPortNumber) /* Port2 Index */ { int i; @@ -1171,11 +1761,11 @@ SK_MAC_ADDR MacAddr; SK_U32 DWord; - if (FromPortNumber >= (SK_U32)pAC->GIni.GIMacsFound) { + if (FromPortNumber >= (SK_U32) pAC->GIni.GIMacsFound) { return (SK_ADDR_ILLEGAL_PORT); } - if (ToPortNumber >= (SK_U32)pAC->GIni.GIMacsFound) { + if (ToPortNumber >= (SK_U32) pAC->GIni.GIMacsFound) { return (SK_ADDR_ILLEGAL_PORT); } @@ -1184,13 +1774,15 @@ } /* - * Swap - * - Exact Match Entries - * - FirstExactMatchRlmt; - * - NextExactMatchRlmt; - * - FirstExactMatchDrv; - * - NextExactMatchDrv; - * - 64-bit filter + * Swap: + * - Exact Match Entries (GEnesis and Yukon) + * Yukon uses first entry for the logical MAC + * address (stored in the second GMAC register). + * - FirstExactMatchRlmt (GEnesis only) + * - NextExactMatchRlmt (GEnesis only) + * - FirstExactMatchDrv (GEnesis only) + * - NextExactMatchDrv (GEnesis only) + * - 64-bit filter (InexactFilter) * - Promiscuous Mode * of ports. */ @@ -1208,46 +1800,49 @@ pAC->Addr.Port[ToPortNumber].InexactFilter.Bytes[i]; pAC->Addr.Port[ToPortNumber].InexactFilter.Bytes[i] = Byte; } - + i = pAC->Addr.Port[FromPortNumber].PromMode; pAC->Addr.Port[FromPortNumber].PromMode = pAC->Addr.Port[ToPortNumber].PromMode; pAC->Addr.Port[ToPortNumber].PromMode = i; - - DWord = pAC->Addr.Port[FromPortNumber].FirstExactMatchRlmt; - pAC->Addr.Port[FromPortNumber].FirstExactMatchRlmt = - pAC->Addr.Port[ToPortNumber].FirstExactMatchRlmt; - pAC->Addr.Port[ToPortNumber].FirstExactMatchRlmt = DWord; - - DWord = pAC->Addr.Port[FromPortNumber].NextExactMatchRlmt; - pAC->Addr.Port[FromPortNumber].NextExactMatchRlmt = - pAC->Addr.Port[ToPortNumber].NextExactMatchRlmt; - pAC->Addr.Port[ToPortNumber].NextExactMatchRlmt = DWord; - - DWord = pAC->Addr.Port[FromPortNumber].FirstExactMatchDrv; - pAC->Addr.Port[FromPortNumber].FirstExactMatchDrv = - pAC->Addr.Port[ToPortNumber].FirstExactMatchDrv; - pAC->Addr.Port[ToPortNumber].FirstExactMatchDrv = DWord; - - DWord = pAC->Addr.Port[FromPortNumber].NextExactMatchDrv; - pAC->Addr.Port[FromPortNumber].NextExactMatchDrv = - pAC->Addr.Port[ToPortNumber].NextExactMatchDrv; - pAC->Addr.Port[ToPortNumber].NextExactMatchDrv = DWord; - + + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + DWord = pAC->Addr.Port[FromPortNumber].FirstExactMatchRlmt; + pAC->Addr.Port[FromPortNumber].FirstExactMatchRlmt = + pAC->Addr.Port[ToPortNumber].FirstExactMatchRlmt; + pAC->Addr.Port[ToPortNumber].FirstExactMatchRlmt = DWord; + + DWord = pAC->Addr.Port[FromPortNumber].NextExactMatchRlmt; + pAC->Addr.Port[FromPortNumber].NextExactMatchRlmt = + pAC->Addr.Port[ToPortNumber].NextExactMatchRlmt; + pAC->Addr.Port[ToPortNumber].NextExactMatchRlmt = DWord; + + DWord = pAC->Addr.Port[FromPortNumber].FirstExactMatchDrv; + pAC->Addr.Port[FromPortNumber].FirstExactMatchDrv = + pAC->Addr.Port[ToPortNumber].FirstExactMatchDrv; + pAC->Addr.Port[ToPortNumber].FirstExactMatchDrv = DWord; + + DWord = pAC->Addr.Port[FromPortNumber].NextExactMatchDrv; + pAC->Addr.Port[FromPortNumber].NextExactMatchDrv = + pAC->Addr.Port[ToPortNumber].NextExactMatchDrv; + pAC->Addr.Port[ToPortNumber].NextExactMatchDrv = DWord; + } + /* CAUTION: Solution works if only ports of one adapter are in use. */ - for (i = 0; (SK_U32)i < pAC->Rlmt.Net[pAC->Rlmt.Port[ToPortNumber]. + for (i = 0; (SK_U32) i < pAC->Rlmt.Net[pAC->Rlmt.Port[ToPortNumber]. Net->NetNumber].NumPorts; i++) { if (pAC->Rlmt.Net[pAC->Rlmt.Port[ToPortNumber].Net->NetNumber]. Port[i]->PortNumber == ToPortNumber) { pAC->Addr.Net[pAC->Rlmt.Port[ToPortNumber].Net->NetNumber]. ActivePort = i; /* 20001207 RA: Was "ToPortNumber;". */ - } } - (void)SkAddrMcUpdate(pAC, IoC, FromPortNumber); - (void)SkAddrMcUpdate(pAC, IoC, ToPortNumber); + + (void) SkAddrMcUpdate(pAC, IoC, FromPortNumber); + (void) SkAddrMcUpdate(pAC, IoC, ToPortNumber); return (SK_ADDR_SUCCESS); + } /* SkAddrSwap */ #ifdef __cplusplus diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/skcsum.c linux.21pre4-ac2/drivers/net/sk98lin/skcsum.c --- linux.21pre4/drivers/net/sk98lin/skcsum.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/skcsum.c 2003-01-06 15:38:18.000000000 +0000 @@ -2,8 +2,8 @@ * * Name: skcsum.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.8 $ - * Date: $Date: 2001/02/06 11:15:36 $ + * Version: $Revision: 1.10 $ + * Date: $Date: 2002/04/11 10:02:04 $ * Purpose: Store/verify Internet checksum in send/receive packets. * ******************************************************************************/ @@ -26,6 +26,15 @@ * History: * * $Log: skcsum.c,v $ + * Revision 1.10 2002/04/11 10:02:04 rwahl + * Fix in SkCsGetSendInfo(): + * - function did not return ProtocolFlags in every case. + * - pseudo header csum calculated wrong for big endian. + * + * Revision 1.9 2001/06/13 07:42:08 gklug + * fix: NetNumber was wrong in CLEAR_STAT event + * add: check for good NetNumber in Clear STAT + * * Revision 1.8 2001/02/06 11:15:36 rassmann * Supporting two nets on dual-port adapters. * @@ -65,7 +74,7 @@ #ifndef lint static const char SysKonnectFileId[] = "@(#)" - "$Id: skcsum.c,v 1.8 2001/02/06 11:15:36 rassmann Exp $" + "$Id: skcsum.c,v 1.10 2002/04/11 10:02:04 rwahl Exp $" " (C) SysKonnect."; #endif /* !lint */ @@ -195,7 +204,7 @@ * zero.) * * Note: - * There is a bug in the ASIC whic may lead to wrong checksums. + * There is a bug in the ASIC which may lead to wrong checksums. * * Arguments: * pAc - A pointer to the adapter context struct. @@ -411,9 +420,9 @@ SKCS_OFS_IP_DESTINATION_ADDRESS + 0) + (unsigned long) *(SK_U16 *) SKCS_IDX(pIpHeader, SKCS_OFS_IP_DESTINATION_ADDRESS + 2) + - (unsigned long) (NextLevelProtocol << 8) + + (unsigned long) SKCS_HTON16(NextLevelProtocol) + (unsigned long) SKCS_HTON16(IpDataLength); - + /* Add-in any carries. */ SKCS_OC_ADD(PseudoHeaderChecksum, PseudoHeaderChecksum, 0); @@ -422,6 +431,7 @@ SKCS_OC_ADD(pPacketInfo->PseudoHeaderChecksum, PseudoHeaderChecksum, 0); + pPacketInfo->ProtocolFlags = ProtocolFlags; NextLevelProtoStats->TxOkCts++; /* Success. */ } /* SkCsGetSendInfo */ @@ -889,11 +899,13 @@ */ case SK_CSUM_EVENT_CLEAR_PROTO_STATS: - ProtoIndex = (int)Param.Para32[0]; - NetNumber = (int)Param.Para32[1]; + ProtoIndex = (int)Param.Para32[1]; + NetNumber = (int)Param.Para32[0]; if (ProtoIndex < 0) { /* Clear for all protocols. */ - memset(&pAc->Csum.ProtoStats[NetNumber][0], 0, - sizeof(pAc->Csum.ProtoStats[NetNumber])); + if (NetNumber >= 0) { + memset(&pAc->Csum.ProtoStats[NetNumber][0], 0, + sizeof(pAc->Csum.ProtoStats[NetNumber])); + } } else { /* Clear for individual protocol. */ memset(&pAc->Csum.ProtoStats[NetNumber][ProtoIndex], 0, diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/skge.c linux.21pre4-ac2/drivers/net/sk98lin/skge.c --- linux.21pre4/drivers/net/sk98lin/skge.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/skge.c 2003-01-06 15:38:18.000000000 +0000 @@ -2,18 +2,20 @@ * * Name: skge.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.29.2.6 $ - * Date: $Date: 2001/05/21 07:59:29 $ + * Version: $Revision: 1.43 $ + * Date: $Date: 2002/11/29 08:42:41 $ * Purpose: The main driver source module * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2001 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * Driver for SysKonnect Gigabit Ethernet Server Adapters: * + * SK-9871 (single link 1000Base-ZX) + * SK-9872 (dual link 1000Base-ZX) * SK-9861 (single link 1000Base-SX, VF45 Volition Plug) * SK-9862 (dual link 1000Base-SX, VF45 Volition Plug) * SK-9841 (single link 1000Base-LX) @@ -22,6 +24,12 @@ * SK-9844 (dual link 1000Base-SX) * SK-9821 (single link 1000Base-T) * SK-9822 (dual link 1000Base-T) + * SK-9881 (single link 1000Base-SX V2 LC) + * SK-9871 (single link 1000Base-ZX V2) + * SK-9861 (single link 1000Base-SX V2, VF45 Volition Plug) + * SK-9841 (single link 1000Base-LX V2) + * SK-9843 (single link 1000Base-SX V2) + * SK-9821 (single link 1000Base-T V2) * * Created 10-Feb-1999, based on Linux' acenic.c, 3c59x.c and * SysKonnects GEnesis Solaris driver @@ -48,6 +56,62 @@ * History: * * $Log: skge.c,v $ + * Revision 1.43 2002/11/29 08:42:41 mlindner + * Fix: Boot message + * + * Revision 1.42 2002/11/28 13:30:23 mlindner + * Add: New frame check + * + * Revision 1.41 2002/11/27 13:55:18 mlindner + * Fix: Drop wrong csum packets + * Fix: Initialize proc_entry after hw check + * + * Revision 1.40 2002/10/31 07:50:37 tschilli + * Function SkGeInitAssignRamToQueues() from common module inserted. + * Autonegotiation is set to ON for all adapters. + * LinkSpeedUsed is used in link up status report. + * Role parameter will show up for 1000 Mbps links only. + * GetConfiguration() inserted after init level 1 in SkGeChangeMtu(). + * All return values of SkGeInit() and SkGeInitPort() are checked. + * + * Revision 1.39 2002/10/02 12:56:05 mlindner + * Add: Support for Yukon + * Add: Support for ZEROCOPY, scatter-gather and hw checksum + * Add: New transmit ring function (use SG and TCP/UDP hardware checksumming) + * Add: New init function + * Add: Speed check and setup + * Add: Merge source for kernel 2.2.x and 2.4.x + * Add: Opcode check for tcp + * Add: Frame length check + * Fix: Transmit complete interrupt + * Fix: Interrupt moderation + * + * Revision 1.29.2.13 2002/01/14 12:44:52 mlindner + * Fix: Rlmt modes + * + * Revision 1.29.2.12 2001/12/07 12:06:18 mlindner + * Fix: malloc -> slab changes + * + * Revision 1.29.2.11 2001/12/06 15:19:20 mlindner + * Add: DMA attributes + * Fix: Module initialisation + * Fix: pci_map_single and pci_unmap_single replaced + * + * Revision 1.29.2.10 2001/12/06 09:56:50 mlindner + * Corrected some printk's + * + * Revision 1.29.2.9 2001/09/05 12:15:34 mlindner + * Add: LBFO Changes + * Fix: Counter Errors (Jumbo == to long errors) + * Fix: Changed pAC->PciDev declaration + * Fix: too short counters + * + * Revision 1.29.2.8 2001/06/25 12:10:44 mlindner + * fix: ReceiveIrq() changed. + * + * Revision 1.29.2.7 2001/06/25 08:07:05 mlindner + * fix: RLMT locking in ReceiveIrq() changed. + * * Revision 1.29.2.6 2001/05/21 07:59:29 mlindner * fix: MTU init problems * @@ -266,24 +330,26 @@ ******************************************************************************/ #include "h/skversion.h" - #include #include #include - #include "h/skdrv1st.h" #include "h/skdrv2nd.h" + /* defines ******************************************************************/ /* for debuging on x86 only */ /* #define BREAKPOINT() asm(" int $3"); */ +/* use the scatter-gather functionality with sendfile() */ +#define SK_ZEROCOPY + /* use of a transmit complete interrupt */ #define USE_TX_COMPLETE /* use interrupt moderation (for tx complete only) */ -// #define USE_INT_MOD -#define INTS_PER_SEC 1000 +#define USE_INT_MOD +#define INTS_PER_SEC 1800 /* * threshold for copying small receive frames @@ -294,10 +360,13 @@ /* number of adapters that can be configured via command line params */ #define SK_MAX_CARD_PARAM 16 + /* * use those defines for a compile-in version of the driver instead * of command line parameters */ +// #define LINK_SPEED_A {"Auto", } +// #define LINK_SPEED_B {"Auto", } // #define AUTO_NEG_A {"Sense", } // #define AUTO_NEG_B {"Sense", } // #define DUP_CAP_A {"Both", } @@ -314,10 +383,8 @@ #define DEV_KFREE_SKB_ANY(skb) dev_kfree_skb_any(skb) /* function prototypes ******************************************************/ -static void FreeResources(struct net_device *dev); -int init_module(void); -void cleanup_module(void); -static int SkGeBoardInit(struct net_device *dev, SK_AC *pAC); +static void FreeResources(struct SK_NET_DEVICE *dev); +static int SkGeBoardInit(struct SK_NET_DEVICE *dev, SK_AC *pAC); static SK_BOOL BoardAllocMem(SK_AC *pAC); static void BoardFreeMem(SK_AC *pAC); static void BoardInitMem(SK_AC *pAC); @@ -326,66 +393,62 @@ static void SkGeIsr(int irq, void *dev_id, struct pt_regs *ptregs); static void SkGeIsrOnePort(int irq, void *dev_id, struct pt_regs *ptregs); -static int SkGeOpen(struct net_device *dev); -static int SkGeClose(struct net_device *dev); -static int SkGeXmit(struct sk_buff *skb, struct net_device *dev); -static int SkGeSetMacAddr(struct net_device *dev, void *p); -static void SkGeSetRxMode(struct net_device *dev); -static struct net_device_stats *SkGeStats(struct net_device *dev); -static int SkGeIoctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int SkGeOpen(struct SK_NET_DEVICE *dev); +static int SkGeClose(struct SK_NET_DEVICE *dev); +static int SkGeXmit(struct sk_buff *skb, struct SK_NET_DEVICE *dev); +static int SkGeSetMacAddr(struct SK_NET_DEVICE *dev, void *p); +static void SkGeSetRxMode(struct SK_NET_DEVICE *dev); +static struct net_device_stats *SkGeStats(struct SK_NET_DEVICE *dev); +static int SkGeIoctl(struct SK_NET_DEVICE *dev, struct ifreq *rq, int cmd); static void GetConfiguration(SK_AC*); static void ProductStr(SK_AC*); static int XmitFrame(SK_AC*, TX_PORT*, struct sk_buff*); static void FreeTxDescriptors(SK_AC*pAC, TX_PORT*); static void FillRxRing(SK_AC*, RX_PORT*); static SK_BOOL FillRxDescriptor(SK_AC*, RX_PORT*); -static void ReceiveIrq(SK_AC*, RX_PORT*); -static void ClearAndStartRx(SK_AC*, int); +static void ReceiveIrq(SK_AC*, RX_PORT*, SK_BOOL); +static void ClearAndStartRx(SK_AC*, int); static void ClearTxIrq(SK_AC*, int, int); static void ClearRxRing(SK_AC*, RX_PORT*); static void ClearTxRing(SK_AC*, TX_PORT*); static void SetQueueSizes(SK_AC *pAC); -static int SkGeChangeMtu(struct net_device *dev, int new_mtu); +static int SkGeChangeMtu(struct SK_NET_DEVICE *dev, int new_mtu); static void PortReInitBmu(SK_AC*, int); static int SkGeIocMib(DEV_NET*, unsigned int, int); - +static int XmitFrameSG(SK_AC*, TX_PORT*, struct sk_buff*); /*Extern */ -extern struct proc_dir_entry *pSkRootDir; - -//extern struct proc_dir_entry Our_Proc_Dir; -extern int proc_read(char *buffer, char **buffer_location, - off_t offset, int buffer_length, int *eof, void *data); - +/* external Proc function */ +extern int proc_read( + char *buffer, + char **buffer_location, + off_t offset, + int buffer_length, + int *eof, + void *data); #ifdef DEBUG static void DumpMsg(struct sk_buff*, char*); static void DumpData(char*, int); static void DumpLong(char*, int); #endif - +void dump_frag( SK_U8 *data, int length); /* global variables *********************************************************/ static const char *BootString = BOOT_STRING; -struct net_device *sk98lin_root_dev = NULL; +struct SK_NET_DEVICE *SkGeRootDev = NULL; static int probed __initdata = 0; -struct inode_operations SkInodeOps; -//static struct file_operations SkFileOps; /* with open/relase */ /* local variables **********************************************************/ static uintptr_t TxQueueAddr[SK_MAX_MACS][2] = {{0x680, 0x600},{0x780, 0x700}}; static uintptr_t RxQueueAddr[SK_MAX_MACS] = {0x400, 0x480}; +/* local variables **********************************************************/ +const char SK_Root_Dir_entry[8]; -void proc_fill_inode(struct inode *inode, int fill) -{ - if (fill) - MOD_INC_USE_COUNT; - else - MOD_DEC_USE_COUNT; -} +static struct proc_dir_entry *pSkRootDir; @@ -405,51 +468,53 @@ static int __init skge_probe (void) { int proc_root_initialized = 0; - int boards_found = 0; - int version_disp = 0; - SK_AC *pAC; - DEV_NET *pNet = NULL; - struct pci_dev *pdev = NULL; - unsigned long base_address; - struct net_device *dev = NULL; + int boards_found = 0; + SK_AC *pAC; + DEV_NET *pNet = NULL; struct proc_dir_entry *pProcFile; + struct pci_dev *pdev = NULL; + unsigned long base_address; + struct SK_NET_DEVICE *dev = NULL; + SK_BOOL DeviceFound = SK_FALSE; + SK_BOOL BootStringCount = SK_FALSE; if (probed) return -ENODEV; probed++; - - /* display driver info */ - if (!version_disp) - { - /* set display flag to TRUE so that */ - /* we only display this string ONCE */ - version_disp = 1; - printk("%s\n", BootString); - } if (!pci_present()) /* is PCI support present? */ return -ENODEV; - while((pdev = pci_find_device(PCI_VENDOR_ID_SYSKONNECT, - PCI_DEVICE_ID_SYSKONNECT_GE, pdev)) != NULL) { + while((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET << 8, pdev))) + { dev = NULL; pNet = NULL; - if (pci_enable_device(pdev)) + if ((pdev->vendor != PCI_VENDOR_ID_SYSKONNECT) && + ((pdev->device != PCI_DEVICE_ID_SYSKONNECT_GE) || + (pdev->device != PCI_DEVICE_ID_SYSKONNECT_YU))){ continue; + } /* Configure DMA attributes. */ if (pci_set_dma_mask(pdev, (u64) 0xffffffffffffffff) && - pci_set_dma_mask(pdev, (u64) 0xffffffff)) - continue; + pci_set_dma_mask(pdev, (u64) 0xffffffff)) + continue; + - if ((dev = init_etherdev(dev, sizeof(DEV_NET))) == 0) { + if ((dev = init_etherdev(dev, sizeof(DEV_NET))) == NULL) { printk(KERN_ERR "Unable to allocate etherdev " "structure!\n"); break; } + if (dev->priv == NULL) { + printk(KERN_ERR "Unable to allocate adapter " + "structure!\n"); + break; + } + pNet = dev->priv; pNet->pAC = kmalloc(sizeof(SK_AC), GFP_KERNEL); if (pNet->pAC == NULL){ @@ -459,15 +524,23 @@ break; } + /* Print message */ + if (!BootStringCount) { + /* set display flag to TRUE so that */ + /* we only display this string ONCE */ + BootStringCount = SK_TRUE; + printk("%s\n", BootString); + } + memset(pNet->pAC, 0, sizeof(SK_AC)); pAC = pNet->pAC; - pAC->PciDev = *pdev; + pAC->PciDev = pdev; pAC->PciDevId = pdev->device; pAC->dev[0] = dev; pAC->dev[1] = dev; sprintf(pAC->Name, "SysKonnect SK-98xx"); pAC->CheckQueue = SK_FALSE; - + pNet->Mtu = 1500; pNet->Up = 0; dev->irq = pdev->irq; @@ -480,28 +553,23 @@ dev->set_mac_address = &SkGeSetMacAddr; dev->do_ioctl = &SkGeIoctl; dev->change_mtu = &SkGeChangeMtu; + dev->flags &= ~IFF_RUNNING; - if(!proc_root_initialized) { - pSkRootDir = create_proc_entry("sk98lin", - S_IFDIR | S_IWUSR | S_IRUGO | S_IXUGO, proc_net); - pSkRootDir->owner = THIS_MODULE; - - proc_root_initialized = 1; +#ifdef SK_ZEROCOPY + if (pAC->GIni.GIChipId == CHIP_ID_YUKON) { + /* Use only if yukon hardware */ + /* SK and ZEROCOPY - fly baby... */ + dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; } - - pProcFile = create_proc_entry(dev->name, - S_IFREG | 0444, pSkRootDir); - pProcFile->read_proc = proc_read; - pProcFile->write_proc = NULL; - pProcFile->nlink = 1; - pProcFile->size = sizeof(dev->name+1); - pProcFile->data = (void*)pProcFile; +#endif /* * Dummy value. */ dev->base_addr = 42; pci_set_master(pdev); + + pci_set_master(pdev); base_address = pci_resource_start (pdev, 0); #ifdef SK_BIG_ENDIAN @@ -515,13 +583,13 @@ our2 |= PCI_REV_DESC; SkPciWriteCfgDWord(pAC, PCI_OUR_REG_2, our2); } -#endif /* BIG ENDIAN */ +#endif /* * Remap the regs into kernel space. */ - pAC->IoBase = (char*)ioremap(base_address, 0x4000); + if (!pAC->IoBase){ printk(KERN_ERR "%s: Unable to map I/O register, " "SK 98xx No. %i will be disabled.\n", @@ -529,8 +597,8 @@ kfree(dev); break; } - pAC->Index = boards_found; + pAC->Index = boards_found; if (SkGeBoardInit(dev, pAC)) { FreeResources(dev); kfree(dev); @@ -540,9 +608,46 @@ memcpy((caddr_t) &dev->dev_addr, (caddr_t) &pAC->Addr.Net[0].CurrentMacAddress, 6); + /* First adapter... Create proc and print message */ + if (!DeviceFound) { + DeviceFound = SK_TRUE; + SK_MEMCPY(&SK_Root_Dir_entry, BootString, + sizeof(SK_Root_Dir_entry) - 1); + + /*Create proc (directory)*/ + if(!proc_root_initialized) { + pSkRootDir = create_proc_entry(SK_Root_Dir_entry, + S_IFDIR | S_IWUSR | S_IRUGO | S_IXUGO, proc_net); + proc_root_initialized = 1; + } + + pSkRootDir->owner = THIS_MODULE; + } + + + + /* Create proc file */ + pProcFile = create_proc_entry(dev->name, + S_IFREG | S_IXUSR | S_IWGRP | S_IROTH, + pSkRootDir); + + + pProcFile->read_proc = proc_read; + pProcFile->write_proc = NULL; + pProcFile->nlink = 1; + pProcFile->size = sizeof(dev->name + 1); + pProcFile->data = (void *)pProcFile; + pNet->PortNr = 0; pNet->NetNr = 0; +#ifdef SK_ZEROCOPY + if (pAC->GIni.GIChipId == CHIP_ID_YUKON) { + /* SG and ZEROCOPY - fly baby... */ + dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; + } +#endif + boards_found++; /* More then one port found */ @@ -569,24 +674,39 @@ dev->set_mac_address = &SkGeSetMacAddr; dev->do_ioctl = &SkGeIoctl; dev->change_mtu = &SkGeChangeMtu; + dev->flags &= ~IFF_RUNNING; - pProcFile = create_proc_entry(dev->name, - S_IFREG | 0444, pSkRootDir); +#ifdef SK_ZEROCOPY + if (pAC->GIni.GIChipId == CHIP_ID_YUKON) { + /* SG and ZEROCOPY - fly baby... */ + dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; + } +#endif + + pProcFile = create_proc_entry(dev->name, + S_IFREG | S_IXUSR | S_IWGRP | S_IROTH, + pSkRootDir); + + pProcFile->read_proc = proc_read; pProcFile->write_proc = NULL; pProcFile->nlink = 1; - pProcFile->size = sizeof(dev->name+1); - pProcFile->data = (void*)pProcFile; + pProcFile->size = sizeof(dev->name + 1); + pProcFile->data = (void *)pProcFile; memcpy((caddr_t) &dev->dev_addr, (caddr_t) &pAC->Addr.Net[1].CurrentMacAddress, 6); printk("%s: %s\n", dev->name, pAC->DeviceStr); printk(" PrefPort:B RlmtMode:Dual Check Link State\n"); - + } + /* Save the hardware revision */ + pAC->HWRevision = (((pAC->GIni.GIPciHwRev >> 4) & 0x0F)*10) + + (pAC->GIni.GIPciHwRev & 0x0F); + /* * This is bollocks, but we need to tell the net-init * code that it shall go for the next device. @@ -617,7 +737,7 @@ * Returns: N/A * */ -static void FreeResources(struct net_device *dev) +static void FreeResources(struct SK_NET_DEVICE *dev) { SK_U32 AllocFlag; DEV_NET *pNet; @@ -640,17 +760,19 @@ } /* FreeResources */ -MODULE_AUTHOR("Christoph Goos "); +MODULE_AUTHOR("Mirko Lindner "); MODULE_DESCRIPTION("SysKonnect SK-NET Gigabit Ethernet SK-98xx driver"); MODULE_LICENSE("GPL"); +MODULE_PARM(Speed_A, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); +MODULE_PARM(Speed_B, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); MODULE_PARM(AutoNeg_A, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); MODULE_PARM(AutoNeg_B, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); MODULE_PARM(DupCap_A, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); MODULE_PARM(DupCap_B, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); MODULE_PARM(FlowCtrl_A, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); MODULE_PARM(FlowCtrl_B, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); -MODULE_PARM(Role_A, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); -MODULE_PARM(Role_B, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); +MODULE_PARM(Role_A, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); +MODULE_PARM(Role_B, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); MODULE_PARM(PrefPort, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); MODULE_PARM(RlmtMode, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s"); /* not used, just there because every driver should have them: */ @@ -658,6 +780,18 @@ MODULE_PARM(debug, "i"); +#ifdef LINK_SPEED_A +static char *Speed_A[SK_MAX_CARD_PARAM] = LINK_SPEED; +#else +static char *Speed_A[SK_MAX_CARD_PARAM] = {"", }; +#endif + +#ifdef LINK_SPEED_B +static char *Speed_B[SK_MAX_CARD_PARAM] = LINK_SPEED; +#else +static char *Speed_B[SK_MAX_CARD_PARAM] = {"", }; +#endif + #ifdef AUTO_NEG_A static char *AutoNeg_A[SK_MAX_CARD_PARAM] = AUTO_NEG_A; #else @@ -718,7 +852,6 @@ static char *RlmtMode[SK_MAX_CARD_PARAM] = {"", }; #endif - static int debug = 0; /* not used */ static int options[SK_MAX_CARD_PARAM] = {0, }; /* not used */ @@ -737,7 +870,7 @@ static int __init skge_init_module(void) { int cards; - sk98lin_root_dev = NULL; + SkGeRootDev = NULL; /* just to avoid warnings ... */ debug = 0; @@ -745,7 +878,7 @@ cards = skge_probe(); if (cards == 0) { - printk("No adapter found\n"); + printk("No adapter found.\n"); } return cards ? 0 : -ENODEV; } /* skge_init_module */ @@ -765,16 +898,16 @@ { DEV_NET *pNet; SK_AC *pAC; -struct net_device *next; +struct SK_NET_DEVICE *next; unsigned long Flags; SK_EVPARA EvPara; - while (sk98lin_root_dev) { - pNet = (DEV_NET*) sk98lin_root_dev->priv; + while (SkGeRootDev) { + pNet = (DEV_NET*) SkGeRootDev->priv; pAC = pNet->pAC; next = pAC->Next; - netif_stop_queue(sk98lin_root_dev); + netif_stop_queue(SkGeRootDev); SkGeYellowLED(pAC, pAC->IoBase, 0); if(pAC->BoardLevel == 2) { @@ -806,17 +939,17 @@ kfree(pAC->dev[1]); } - FreeResources(sk98lin_root_dev); + FreeResources(SkGeRootDev); - sk98lin_root_dev->get_stats = NULL; + SkGeRootDev->get_stats = NULL; /* * otherwise unregister_netdev calls get_stats with * invalid IO ... :-( */ - unregister_netdev(sk98lin_root_dev); - kfree(sk98lin_root_dev); + unregister_netdev(SkGeRootDev); + kfree(SkGeRootDev); kfree(pAC); - sk98lin_root_dev = next; + SkGeRootDev = next; } /* clear proc-dir */ @@ -840,13 +973,14 @@ * 0, if everything is ok * !=0, on error */ -static int __init SkGeBoardInit(struct net_device *dev, SK_AC *pAC) +static int __init SkGeBoardInit(struct SK_NET_DEVICE *dev, SK_AC *pAC) { short i; unsigned long Flags; char *DescrString = "sk98lin: Driver for Linux"; /* this is given to PNMI */ char *VerStr = VER_STRING; int Ret; /* return code of request_irq */ +SK_BOOL DualNet; SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, ("IoBase: %08lX\n", (unsigned long)pAC->IoBase)); @@ -858,7 +992,6 @@ } /* Initialize the mutexes */ - for (i=0; iTxPort[i][0].TxDesRingLock); spin_lock_init(&pAC->RxPort[i].RxDesRingLock); @@ -908,7 +1041,6 @@ pAC->GIni.GIPortUsage = SK_MUL_LINK; } - pAC->BoardLevel = 1; spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); @@ -918,12 +1050,13 @@ Ret = request_irq(dev->irq, SkGeIsrOnePort, SA_SHIRQ, pAC->Name, dev); } else { - printk(KERN_WARNING "%s: illegal number of ports: %d\n", + printk(KERN_WARNING "%s: Illegal number of ports: %d\n", dev->name, pAC->GIni.GIMacsFound); return -EAGAIN; } + if (Ret) { - printk(KERN_WARNING "%s: Requested IRQ %d is busy\n", + printk(KERN_WARNING "%s: Requested IRQ %d is busy.\n", dev->name, dev->irq); return -EAGAIN; } @@ -931,7 +1064,7 @@ /* Alloc memory for this board (Mem for RxD/TxD) : */ if(!BoardAllocMem(pAC)) { - printk("No memory for descriptor rings\n"); + printk("No memory for descriptor rings.\n"); return(-EAGAIN); } @@ -941,8 +1074,24 @@ pAC->CsOfs = (pAC->CsOfs2 << 16) | pAC->CsOfs1; BoardInitMem(pAC); - +#if 0 SetQueueSizes(pAC); +#else + /* tschilling: New common function with minimum size check. */ + DualNet = SK_FALSE; + if (pAC->RlmtNets == 2) { + DualNet = SK_TRUE; + } + + if (SkGeInitAssignRamToQueues( + pAC, + pAC->ActivePort, + DualNet)) { + BoardFreeMem(pAC); + printk("SkGeInitAssignRamToQueues failed.\n"); + return(-EAGAIN); + } +#endif /* Print adapter specific string from vpd */ ProductStr(pAC); @@ -957,14 +1106,13 @@ ((pAC->RlmtMode==7) ? "Check Segmentation" : ((pAC->RlmtMode==17) ? "Dual Check Link State" :"Error"))))); - SkGeYellowLED(pAC, pAC->IoBase, 1); /* * Register the device here */ - pAC->Next = sk98lin_root_dev; - sk98lin_root_dev = dev; + pAC->Next = SkGeRootDev; + SkGeRootDev = dev; return (0); } /* SkGeBoardInit */ @@ -1001,17 +1149,19 @@ AllocLength = (RX_RING_SIZE + TX_RING_SIZE) * pAC->GIni.GIMacsFound + RX_RING_SIZE + 8; #endif - pDescrMem = pci_alloc_consistent(&pAC->PciDev, AllocLength, + + pDescrMem = pci_alloc_consistent(pAC->PciDev, AllocLength, &pAC->pDescrMemDMA); + if (pDescrMem == NULL) { return (SK_FALSE); } pAC->pDescrMem = pDescrMem; + BusAddr = (unsigned long) pAC->pDescrMemDMA; /* Descriptors need 8 byte alignment, and this is ensured * by pci_alloc_consistent. */ - BusAddr = (unsigned long) pAC->pDescrMemDMA; for (i=0; iGIni.GIMacsFound; i++) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, ("TX%d/A: pDescrMem: %lX, PhysDescrMem: %lX\n", @@ -1059,7 +1209,8 @@ AllocLength = (RX_RING_SIZE + TX_RING_SIZE) * pAC->GIni.GIMacsFound + RX_RING_SIZE + 8; #endif - pci_free_consistent(&pAC->PciDev, AllocLength, + + pci_free_consistent(pAC->PciDev, AllocLength, pAC->pDescrMem, pAC->pDescrMemDMA); pAC->pDescrMem = NULL; } /* BoardFreeMem */ @@ -1146,8 +1297,7 @@ DescrSize = (((sizeof(TXD) - 1) / DESCR_ALIGN) + 1) * DESCR_ALIGN; DescrNum = TX_RING_SIZE / DescrSize; - } - else { + } else { DescrSize = (((sizeof(RXD) - 1) / DESCR_ALIGN) + 1) * DESCR_ALIGN; DescrNum = RX_RING_SIZE / DescrSize; @@ -1156,7 +1306,7 @@ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, ("Descriptor size: %d Descriptor Number: %d\n", DescrSize,DescrNum)); - + pDescr = (RXD*) pMemArea; pPrevDescr = NULL; pNextDescr = (RXD*) (((char*)pDescr) + DescrSize); @@ -1166,7 +1316,8 @@ pDescr->VNextRxd = VNextDescr & 0xffffffffULL; pDescr->pNextRxd = pNextDescr; pDescr->TcpSumStarts = pAC->CsOfs; - /* advance on step */ + + /* advance one step */ pPrevDescr = pDescr; pDescr = pNextDescr; pNextDescr = (RXD*) (((char*)pDescr) + DescrSize); @@ -1240,8 +1391,7 @@ */ static void SkGeIsr(int irq, void *dev_id, struct pt_regs *ptregs) { -struct net_device *dev = (struct net_device *)dev_id; - +struct SK_NET_DEVICE *dev = (struct SK_NET_DEVICE *)dev_id; DEV_NET *pNet; SK_AC *pAC; SK_U32 IntSrc; /* interrupts source register contents */ @@ -1269,22 +1419,22 @@ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF RX1 IRQ\n")); - ReceiveIrq(pAC, &pAC->RxPort[0]); - SK_PNMI_CNT_RX_INTR(pAC,0); + ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE); + SK_PNMI_CNT_RX_INTR(pAC, 0); } if (IntSrc & IRQ_EOF_RX2) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF RX2 IRQ\n")); - ReceiveIrq(pAC, &pAC->RxPort[1]); - SK_PNMI_CNT_RX_INTR(pAC,1); + ReceiveIrq(pAC, &pAC->RxPort[1], SK_TRUE); + SK_PNMI_CNT_RX_INTR(pAC, 1); } #ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */ if (IntSrc & IRQ_EOF_AS_TX1) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF AS TX1 IRQ\n")); - SK_PNMI_CNT_TX_INTR(pAC,0); + SK_PNMI_CNT_TX_INTR(pAC, 0); spin_lock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock); FreeTxDescriptors(pAC, &pAC->TxPort[0][TX_PRIO_LOW]); spin_unlock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock); @@ -1293,7 +1443,7 @@ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF AS TX2 IRQ\n")); - SK_PNMI_CNT_TX_INTR(pAC,1); + SK_PNMI_CNT_TX_INTR(pAC, 1); spin_lock(&pAC->TxPort[1][TX_PRIO_LOW].TxDesRingLock); FreeTxDescriptors(pAC, &pAC->TxPort[1][TX_PRIO_LOW]); spin_unlock(&pAC->TxPort[1][TX_PRIO_LOW].TxDesRingLock); @@ -1303,7 +1453,7 @@ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF SY TX1 IRQ\n")); - SK_PNMI_CNT_TX_INTR(pAC,0); + SK_PNMI_CNT_TX_INTR(pAC, 1); spin_lock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock); FreeTxDescriptors(pAC, 0, TX_PRIO_HIGH); spin_unlock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock); @@ -1313,14 +1463,14 @@ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF SY TX2 IRQ\n")); - SK_PNMI_CNT_TX_INTR(pAC,1); + SK_PNMI_CNT_TX_INTR(pAC, 1); spin_lock(&pAC->TxPort[1][TX_PRIO_HIGH].TxDesRingLock); FreeTxDescriptors(pAC, 1, TX_PRIO_HIGH); spin_unlock(&pAC->TxPort[1][TX_PRIO_HIGH].TxDesRingLock); ClearTxIrq(pAC, 1, TX_PRIO_HIGH); } -#endif /* 0 */ -#endif /* USE_TX_COMPLETE */ +#endif +#endif /* do all IO at once */ if (IntSrc & IRQ_EOF_RX1) @@ -1338,11 +1488,12 @@ if ((IntSrc & SPECIAL_IRQS) || pAC->CheckQueue) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, - ("SPECIAL IRQ\n")); + ("SPECIAL IRQ DP-Cards => %x\n", IntSrc)); pAC->CheckQueue = SK_FALSE; spin_lock(&pAC->SlowPathLock); if (IntSrc & SPECIAL_IRQS) SkGeSirqIsr(pAC, pAC->IoBase, IntSrc); + SkEventDispatcher(pAC, pAC->IoBase); spin_unlock(&pAC->SlowPathLock); } @@ -1351,33 +1502,17 @@ * came in after handling the ring (OUTs may be delayed * in hardware buffers, but are through after IN) */ - // ReceiveIrq(pAC, &pAC->RxPort[pAC->ActivePort]); - ReceiveIrq(pAC, &pAC->RxPort[0]); - ReceiveIrq(pAC, &pAC->RxPort[1]); - + ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE); + ReceiveIrq(pAC, &pAC->RxPort[1], SK_TRUE); + if (pAC->CheckQueue) { + pAC->CheckQueue = SK_FALSE; + spin_lock(&pAC->SlowPathLock); + SkEventDispatcher(pAC, pAC->IoBase); + spin_unlock(&pAC->SlowPathLock); + } -#if 0 -// #ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */ - spin_lock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock); - FreeTxDescriptors(pAC, &pAC->TxPort[0][TX_PRIO_LOW]); - spin_unlock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock); - - spin_lock(&pAC->TxPort[1][TX_PRIO_LOW].TxDesRingLock); - FreeTxDescriptors(pAC, &pAC->TxPort[1][TX_PRIO_LOW]); - spin_unlock(&pAC->TxPort[1][TX_PRIO_LOW].TxDesRingLock); - -#if 0 /* only if sync. queues used */ - spin_lock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock); - FreeTxDescriptors(pAC, 0, TX_PRIO_HIGH); - spin_unlock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock); - - spin_lock(&pAC->TxPort[1][TX_PRIO_HIGH].TxDesRingLock); - FreeTxDescriptors(pAC, 1, TX_PRIO_HIGH); - spin_unlock(&pAC->TxPort[1][TX_PRIO_HIGH].TxDesRingLock); -#endif /* 0 */ -#endif /* USE_TX_COMPLETE */ /* IRQ is processed - Enable IRQs again*/ SK_OUT32(pAC->IoBase, B0_IMSK, IRQ_MASK); @@ -1401,7 +1536,7 @@ */ static void SkGeIsrOnePort(int irq, void *dev_id, struct pt_regs *ptregs) { -struct net_device *dev = (struct net_device *)dev_id; +struct SK_NET_DEVICE *dev = (struct SK_NET_DEVICE *)dev_id; DEV_NET *pNet; SK_AC *pAC; SK_U32 IntSrc; /* interrupts source register contents */ @@ -1416,7 +1551,7 @@ if (IntSrc == 0) { return; } - + while (((IntSrc & IRQ_MASK) & ~SPECIAL_IRQS) != 0) { #if 0 /* software irq currently not used */ if (IntSrc & IRQ_SW) { @@ -1429,15 +1564,15 @@ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF RX1 IRQ\n")); - ReceiveIrq(pAC, &pAC->RxPort[0]); - SK_PNMI_CNT_RX_INTR(pAC,0); + ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE); + SK_PNMI_CNT_RX_INTR(pAC, 0); } #ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */ if (IntSrc & IRQ_EOF_AS_TX1) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF AS TX1 IRQ\n")); - SK_PNMI_CNT_TX_INTR(pAC,0); + SK_PNMI_CNT_TX_INTR(pAC, 0); spin_lock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock); FreeTxDescriptors(pAC, &pAC->TxPort[0][TX_PRIO_LOW]); spin_unlock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock); @@ -1447,14 +1582,14 @@ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, ("EOF SY TX1 IRQ\n")); - SK_PNMI_CNT_TX_INTR(pAC,1); + SK_PNMI_CNT_TX_INTR(pAC, 0); spin_lock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock); FreeTxDescriptors(pAC, 0, TX_PRIO_HIGH); spin_unlock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock); ClearTxIrq(pAC, 0, TX_PRIO_HIGH); } -#endif /* 0 */ -#endif /* USE_TX_COMPLETE */ +#endif +#endif /* do all IO at once */ if (IntSrc & IRQ_EOF_RX1) @@ -1468,11 +1603,12 @@ if ((IntSrc & SPECIAL_IRQS) || pAC->CheckQueue) { SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC, - ("SPECIAL IRQ\n")); + ("SPECIAL IRQ SP-Cards => %x\n", IntSrc)); pAC->CheckQueue = SK_FALSE; spin_lock(&pAC->SlowPathLock); if (IntSrc & SPECIAL_IRQS) SkGeSirqIsr(pAC, pAC->IoBase, IntSrc); + SkEventDispatcher(pAC, pAC->IoBase); spin_unlock(&pAC->SlowPathLock); } @@ -1481,21 +1617,7 @@ * came in after handling the ring (OUTs may be delayed * in hardware buffers, but are through after IN) */ - ReceiveIrq(pAC, &pAC->RxPort[0]); - -#if 0 -// #ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */ - spin_lock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock); - FreeTxDescriptors(pAC, &pAC->TxPort[0][TX_PRIO_LOW]); - spin_unlock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock); - -#if 0 /* only if sync. queues used */ - spin_lock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock); - FreeTxDescriptors(pAC, 0, TX_PRIO_HIGH); - spin_unlock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock); - -#endif /* 0 */ -#endif /* USE_TX_COMPLETE */ + ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE); /* IRQ is processed - Enable IRQs again*/ SK_OUT32(pAC->IoBase, B0_IMSK, IRQ_MASK); @@ -1521,13 +1643,13 @@ * != 0 on error */ static int SkGeOpen( -struct net_device *dev) +struct SK_NET_DEVICE *dev) { -DEV_NET *pNet; -SK_AC *pAC; -unsigned long Flags; /* for spin lock */ -int i; -SK_EVPARA EvPara; /* an event parameter union */ + DEV_NET *pNet; + SK_AC *pAC; + unsigned long Flags; /* for spin lock */ + int i; + SK_EVPARA EvPara; /* an event parameter union */ pNet = (DEV_NET*) dev->priv; pAC = pNet->pAC; @@ -1538,7 +1660,7 @@ if (pAC->BoardLevel == 0) { /* level 1 init common modules here */ if (SkGeInit(pAC, pAC->IoBase, 1) != 0) { - printk("%s: HWInit(1) failed\n", pAC->dev[pNet->PortNr]->name); + printk("%s: HWInit (1) failed.\n", pAC->dev[pNet->PortNr]->name); return (-1); } SkI2cInit (pAC, pAC->IoBase, 1); @@ -1551,8 +1673,11 @@ } if (pAC->BoardLevel != 2) { - /* level 2 init modules here */ - SkGeInit (pAC, pAC->IoBase, 2); + /* tschilling: Level 2 init modules here, check return value. */ + if (SkGeInit(pAC, pAC->IoBase, 2) != 0) { + printk("%s: HWInit (2) failed.\n", pAC->dev[pNet->PortNr]->name); + return (-1); + } SkI2cInit (pAC, pAC->IoBase, 2); SkEventInit (pAC, pAC->IoBase, 2); SkPnmiInit (pAC, pAC->IoBase, 2); @@ -1561,6 +1686,7 @@ SkTimerInit (pAC, pAC->IoBase, 2); pAC->BoardLevel = 2; } + for (i=0; iGIni.GIMacsFound; i++) { /* Enable transmit descriptor polling. */ SkGePollTxD(pAC, pAC->IoBase, i, SK_TRUE); @@ -1627,18 +1753,17 @@ * error code - on error */ static int SkGeClose( -struct net_device *dev) +struct SK_NET_DEVICE *dev) { -DEV_NET *pNet; -SK_AC *pAC; + DEV_NET *pNet; + SK_AC *pAC; -unsigned long Flags; /* for spin lock */ -int i; -int PortIdx; -SK_EVPARA EvPara; + unsigned long Flags; /* for spin lock */ + int i; + int PortIdx; + SK_EVPARA EvPara; netif_stop_queue(dev); - pNet = (DEV_NET*) dev->priv; pAC = pNet->pAC; @@ -1653,7 +1778,6 @@ /* * Clear multicast table, promiscuous mode .... */ - SkAddrMcClear(pAC, pAC->IoBase, PortIdx, 0); SkAddrPromiscuousChange(pAC, pAC->IoBase, PortIdx, SK_PROM_MODE_NONE); @@ -1688,16 +1812,17 @@ spin_unlock_irqrestore(&pAC->TxPort[pNet->PortNr] [TX_PRIO_LOW].TxDesRingLock, Flags); } + if (pAC->RlmtNets == 1) { /* clear all descriptor rings */ for (i=0; iGIni.GIMacsFound; i++) { - ReceiveIrq(pAC, &pAC->RxPort[i]); + ReceiveIrq(pAC, &pAC->RxPort[i], SK_TRUE); ClearRxRing(pAC, &pAC->RxPort[i]); ClearTxRing(pAC, &pAC->TxPort[i][TX_PRIO_LOW]); } } else { /* clear port descriptor rings */ - ReceiveIrq(pAC, &pAC->RxPort[pNet->PortNr]); + ReceiveIrq(pAC, &pAC->RxPort[pNet->PortNr], SK_TRUE); ClearRxRing(pAC, &pAC->RxPort[pNet->PortNr]); ClearTxRing(pAC, &pAC->TxPort[pNet->PortNr][TX_PRIO_LOW]); } @@ -1712,6 +1837,7 @@ return (0); } /* SkGeClose */ + /***************************************************************************** * * SkGeXmit - Linux frame transmit function @@ -1727,23 +1853,47 @@ * WARNING: returning 1 in 'tbusy' case caused system crashes (double * allocated skb's) !!! */ -static int SkGeXmit(struct sk_buff *skb, struct net_device *dev) +static int SkGeXmit(struct sk_buff *skb, struct SK_NET_DEVICE *dev) { DEV_NET *pNet; SK_AC *pAC; int Rc; /* return code of XmitFrame */ - + pNet = (DEV_NET*) dev->priv; pAC = pNet->pAC; - if (pAC->RlmtNets == 2) - Rc = XmitFrame(pAC, &pAC->TxPort[pNet->PortNr][TX_PRIO_LOW], skb); - else - Rc = XmitFrame(pAC, &pAC->TxPort[pAC->ActivePort][TX_PRIO_LOW], skb); + if ((!skb_shinfo(skb)->nr_frags) || + (pAC->GIni.GIChipId == CHIP_ID_GENESIS)) { + /* Don't activate scatter-gather and hardware checksum */ + + if (pAC->RlmtNets == 2) + Rc = XmitFrame( + pAC, + &pAC->TxPort[pNet->PortNr][TX_PRIO_LOW], + skb); + else + Rc = XmitFrame( + pAC, + &pAC->TxPort[pAC->ActivePort][TX_PRIO_LOW], + skb); + } else { + /* scatter-gather and hardware TCP checksumming anabled*/ + if (pAC->RlmtNets == 2) + Rc = XmitFrameSG( + pAC, + &pAC->TxPort[pNet->PortNr][TX_PRIO_LOW], + skb); + else + Rc = XmitFrameSG( + pAC, + &pAC->TxPort[pAC->ActivePort][TX_PRIO_LOW], + skb); + } /* Transmitter out of resources? */ - if (Rc <= 0) + if (Rc <= 0) { netif_stop_queue(dev); + } /* If not taken, give buffer ownership back to the * queueing layer. @@ -1773,10 +1923,10 @@ * if necessary. * * Returns: - * > 0 - on succes: the number of bytes in the message - * = 0 - on resource shortage: this frame sent or dropped, now - * the ring is full ( -> set tbusy) - * < 0 - on failure: other problems ( -> return failure to upper layers) + * > 0 - on succes: the number of bytes in the message + * = 0 - on resource shortage: this frame sent or dropped, now + * the ring is full ( -> set tbusy) + * < 0 - on failure: other problems ( -> return failure to upper layers) */ static int XmitFrame( SK_AC *pAC, /* pointer to adapter context */ @@ -1792,7 +1942,9 @@ ("X")); spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags); - +#ifndef USE_TX_COMPLETE + FreeTxDescriptors(pAC, pTxPort); +#endif if (pTxPort->TxdRingFree == 0) { /* no enough free descriptors in ring at the moment */ FreeTxDescriptors(pAC, pTxPort); @@ -1803,6 +1955,8 @@ SK_DBGCAT_DRV_TX_PROGRESS, ("XmitFrame failed\n")); /* this message can not be sent now */ + /* Because tbusy seems to be set, the message should not be freed here */ + /* It will be used by the scheduler of the ethernet handler */ return (-1); } } @@ -1821,12 +1975,12 @@ #endif /* set up descriptor and CONTROL dword */ - PhysAddr = (SK_U64) pci_map_page(&pAC->PciDev, - virt_to_page(pMessage->data), - ((unsigned long) pMessage->data & - ~PAGE_MASK), - pMessage->len, - PCI_DMA_TODEVICE); + PhysAddr = (SK_U64) pci_map_page(pAC->PciDev, + virt_to_page(pMessage->data), + ((unsigned long) pMessage->data & + ~PAGE_MASK), + pMessage->len, + PCI_DMA_TODEVICE); pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32); pTxd->pMBuf = pMessage; @@ -1847,18 +2001,200 @@ BytesSend = pMessage->len; + spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); /* after releasing the lock, the skb may be immidiately freed */ - if (pTxPort->TxdRingFree != 0) { - spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); + if (pTxPort->TxdRingFree != 0) return (BytesSend); - } - else { - /* ring full: set tbusy on return */ - spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); + else return (0); - } + } /* XmitFrame */ +/***************************************************************************** + * + * XmitFrameSG - fill one socket buffer into the transmit ring + * (use SG and TCP/UDP hardware checksumming) + * + * Description: + * This function puts a message into the transmit descriptor ring + * if there is a descriptors left. + * + * Returns: + * > 0 - on succes: the number of bytes in the message + * = 0 - on resource shortage: this frame sent or dropped, now + * the ring is full ( -> set tbusy) + * < 0 - on failure: other problems ( -> return failure to upper layers) + */ +static int XmitFrameSG( +SK_AC *pAC, /* pointer to adapter context */ +TX_PORT *pTxPort, /* pointer to struct of port to send to */ +struct sk_buff *pMessage) /* pointer to send-message */ +{ + + int i; + int BytesSend; + int hlength; + int protocol; + skb_frag_t *sk_frag; + TXD *pTxd; + TXD *pTxdFst; + TXD *pTxdLst; + SK_U64 PhysAddr; + unsigned long Flags; + + spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags); +#ifndef USE_TX_COMPLETE + FreeTxDescriptors(pAC, pTxPort); +#endif + if ((skb_shinfo(pMessage)->nr_frags +1) > pTxPort->TxdRingFree) { + FreeTxDescriptors(pAC, pTxPort); + if ((skb_shinfo(pMessage)->nr_frags + 1) > pTxPort->TxdRingFree) { + spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); + SK_PNMI_CNT_NO_TX_BUF(pAC, pTxPort->PortIndex); + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_TX_PROGRESS, + ("XmitFrameSG failed - Ring full\n")); + /* this message can not be sent now */ + return(-1); + } + } + + + pTxd = pTxPort->pTxdRingHead; + pTxdFst = pTxd; + pTxdLst = pTxd; + BytesSend = 0; + protocol = 0; + + /* map first fragment (header) */ + PhysAddr = (SK_U64) pci_map_page(pAC->PciDev, + virt_to_page(pMessage->data), + ((unsigned long) pMessage->data & ~PAGE_MASK), + skb_headlen(pMessage), + PCI_DMA_TODEVICE); + + pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); + pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32); + + /* HW checksum? */ + if (pMessage->ip_summed == CHECKSUM_HW) { + pTxd->TBControl = TX_CTRL_STF | + TX_CTRL_ST_FWD | + skb_headlen(pMessage); + + /* We have to use the opcode for tcp here because the opcode for + udp is not working in the hardware yet (revision 2.0)*/ + protocol = ((SK_U8)pMessage->data[23] & 0xf); + if ((protocol == 17) && (pAC->GIni.GIChipRev != 0)) + pTxd->TBControl |= BMU_UDP_CHECK; + else + pTxd->TBControl |= BMU_TCP_CHECK ; + + hlength = ((SK_U8)pMessage->data[14] & 0xf) * 4; + pTxd->TcpSumOfs = 0; /* PH-Checksum already claculated */ + pTxd->TcpSumSt = 14+hlength+16; + pTxd->TcpSumWr = 14+hlength; + + } else { + pTxd->TBControl = TX_CTRL_CHECK_DEFAULT | + TX_CTRL_SOFTWARE | + TX_CTRL_STF | + skb_headlen(pMessage); + } + + pTxd = pTxd->pNextTxd; + pTxPort->TxdRingFree--; + BytesSend += skb_headlen(pMessage); + + + /* Map SG fragments */ + for (i = 0; i < skb_shinfo(pMessage)->nr_frags; i++) { + sk_frag = &skb_shinfo(pMessage)->frags[i]; + + /* we already have the proper value in entry */ + PhysAddr = (SK_U64) pci_map_page(pAC->PciDev, + sk_frag->page, + sk_frag->page_offset, + sk_frag->size, + PCI_DMA_TODEVICE); + + pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); + pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32); + pTxd->pMBuf = pMessage; + + /* HW checksum */ + if (pMessage->ip_summed == CHECKSUM_HW) { + pTxd->TBControl = TX_CTRL_OWN_BMU | + TX_CTRL_SOFTWARE | + TX_CTRL_ST_FWD; + + /* We have to use the opcode for tcp here because the opcode for + udp is not working in the hardware yet (revision 2.0)*/ + if ((protocol == 17) && (pAC->GIni.GIChipRev != 0)) + pTxd->TBControl |= BMU_UDP_CHECK ; + else + pTxd->TBControl |= BMU_TCP_CHECK ; + + } else { + pTxd->TBControl = TX_CTRL_CHECK_DEFAULT | + TX_CTRL_SOFTWARE | + TX_CTRL_OWN_BMU; + } + + /* Last fragment */ + if( (i+1) == skb_shinfo(pMessage)->nr_frags ) { +#ifdef USE_TX_COMPLETE + pTxd->TBControl |= TX_CTRL_EOF | + TX_CTRL_EOF_IRQ | + sk_frag->size; +#else + pTxd->TBControl |= TX_CTRL_EOF | + sk_frag->size; +#endif + pTxdFst->TBControl |= TX_CTRL_OWN_BMU | + TX_CTRL_SOFTWARE; + + } else { + pTxd->TBControl |= sk_frag->size; + } + pTxdLst = pTxd; + pTxd = pTxd->pNextTxd; + pTxPort->TxdRingFree--; + BytesSend += sk_frag->size; + } + + if ((pTxPort->pTxdRingPrev->TBControl & TX_CTRL_OWN_BMU) == 0) { + /* previous descriptor already done, so give tx start cmd */ + /* StartTx(pAC, pTxPort->HwAddr); */ + SK_OUT8(pTxPort->HwAddr, TX_Q_CTRL, TX_Q_CTRL_START); + } + + pTxPort->pTxdRingPrev = pTxdLst; + pTxPort->pTxdRingHead = pTxd; + + spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); + + if (pTxPort->TxdRingFree > 0) + return (BytesSend); + else + return (0); +} + + +void dump_frag( SK_U8 *data, int length) +{ + int i; + + printk("Length: %d\n", length); + for( i=0; i < length; i++ ) { + printk(" %02x", (SK_U8)*(data + i) ); + if( !((i+1) % 20) ) + printk("\n"); + } + printk("\n\n"); + +} + /***************************************************************************** * @@ -1889,7 +2225,6 @@ pNewTail = pTxPort->pTxdRingTail; pTxd = pNewTail; - /* * loop forever; exits if TX_CTRL_SOFTWARE bit not set in start frame * or TX_CTRL_OWN_BMU bit set in any frame @@ -1918,19 +2253,19 @@ /* release the DMA mapping */ PhysAddr = ((SK_U64) pTxd->VDataHigh) << (SK_U64) 32; PhysAddr |= (SK_U64) pTxd->VDataLow; - pci_unmap_page(&pAC->PciDev, PhysAddr, - pTxd->pMBuf->len, - PCI_DMA_TODEVICE); + pci_unmap_page(pAC->PciDev, PhysAddr, + pTxd->pMBuf->len, + PCI_DMA_TODEVICE); + + if (Control & TX_CTRL_EOF) + DEV_KFREE_SKB_ANY(pTxd->pMBuf); /* free message */ - /* free message */ - DEV_KFREE_SKB_ANY(pTxd->pMBuf); pTxPort->TxdRingFree++; pTxd->TBControl &= ~TX_CTRL_SOFTWARE; pTxd = pTxd->pNextTxd; /* point behind fragment with EOF */ } /* while(forever) */ } /* FreeTxDescriptors */ - /***************************************************************************** * * FillRxRing - fill the receive ring with valid descriptors @@ -1999,12 +2334,12 @@ pRxPort->pRxdRingTail = pRxd->pNextRxd; pRxPort->RxdRingFree--; Length = pAC->RxBufSize; - PhysAddr = (SK_U64) pci_map_page(&pAC->PciDev, - virt_to_page(pMsgBlock->data), - ((unsigned long) pMsgBlock->data & - ~PAGE_MASK), - pAC->RxBufSize - 2, - PCI_DMA_FROMDEVICE); + PhysAddr = (SK_U64) pci_map_page(pAC->PciDev, + virt_to_page(pMsgBlock->data), + ((unsigned long) pMsgBlock->data & + ~PAGE_MASK), + pAC->RxBufSize - 2, + PCI_DMA_FROMDEVICE); pRxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); pRxd->VDataHigh = (SK_U32) (PhysAddr >> 32); pRxd->pMBuf = pMsgBlock; @@ -2061,28 +2396,32 @@ * Returns: N/A */ static void ReceiveIrq( -SK_AC *pAC, /* pointer to adapter context */ -RX_PORT *pRxPort) /* pointer to receive port struct */ -{ -RXD *pRxd; /* pointer to receive descriptors */ -SK_U32 Control; /* control field of descriptor */ -struct sk_buff *pMsg; /* pointer to message holding frame */ -struct sk_buff *pNewMsg; /* pointer to a new message for copying frame */ -int FrameLength; /* total length of received frame */ -SK_MBUF *pRlmtMbuf; /* ptr to a buffer for giving a frame to rlmt */ -SK_EVPARA EvPara; /* an event parameter union */ -int PortIndex = pRxPort->PortIndex; + SK_AC *pAC, /* pointer to adapter context */ + RX_PORT *pRxPort, /* pointer to receive port struct */ + SK_BOOL SlowPathLock) /* indicates if SlowPathLock is needed */ +{ +RXD *pRxd; /* pointer to receive descriptors */ +SK_U32 Control; /* control field of descriptor */ +struct sk_buff *pMsg; /* pointer to message holding frame */ +struct sk_buff *pNewMsg; /* pointer to a new message for copying frame */ +int FrameLength; /* total length of received frame */ +SK_MBUF *pRlmtMbuf; /* ptr to a buffer for giving a frame to rlmt */ +SK_EVPARA EvPara; /* an event parameter union */ +unsigned long Flags; /* for spin lock */ +int PortIndex = pRxPort->PortIndex; unsigned int Offset; unsigned int NumBytes; unsigned int ForRlmt; -SK_BOOL IsBc; -SK_BOOL IsMc; -SK_U32 FrameStat; +SK_BOOL IsBc; +SK_BOOL IsMc; +SK_BOOL IsBadFrame; /* Bad frame */ + +SK_U32 FrameStat; unsigned short Csum1; unsigned short Csum2; unsigned short Type; -int Result; -SK_U64 PhysAddr; +int Result; +SK_U64 PhysAddr; rx_start: /* do forever; exit if RX_CTRL_OWN_BMU found */ @@ -2124,33 +2463,75 @@ (RX_CTRL_STF | RX_CTRL_EOF)) { goto rx_failed; } - + /* here we have a complete frame in the ring */ pMsg = pRxd->pMBuf; FrameStat = pRxd->FrameStat; + + /* check for frame length mismatch */ +#define XMR_FS_LEN_SHIFT 18 +#define GMR_FS_LEN_SHIFT 16 + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + if (FrameLength != (SK_U32) (FrameStat >> XMR_FS_LEN_SHIFT)) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_RX_PROGRESS, + ("skge: Frame length mismatch (%u/%u).\n", + FrameLength, + (SK_U32) (FrameStat >> XMR_FS_LEN_SHIFT))); + goto rx_failed; + } + } + else { + if (FrameLength != (SK_U32) (FrameStat >> GMR_FS_LEN_SHIFT)) { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_RX_PROGRESS, + ("skge: Frame length mismatch (%u/%u).\n", + FrameLength, + (SK_U32) (FrameStat >> XMR_FS_LEN_SHIFT))); + goto rx_failed; + } + } + + /* Set Rx Status */ + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + IsBc = (FrameStat & XMR_FS_BC) != 0; + IsMc = (FrameStat & XMR_FS_MC) != 0; + IsBadFrame = (FrameStat & + (XMR_FS_ANY_ERR | XMR_FS_2L_VLAN)) != 0; + } else { + IsBc = (FrameStat & GMR_FS_BC) != 0; + IsMc = (FrameStat & GMR_FS_MC) != 0; + IsBadFrame = (((FrameStat & GMR_FS_ANY_ERR) != 0) || + ((FrameStat & GMR_FS_RX_OK) == 0)); + } + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 0, ("Received frame of length %d on port %d\n", FrameLength, PortIndex)); SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 0, ("Number of free rx descriptors: %d\n", pRxPort->RxdRingFree)); - /*DumpMsg(pMsg, "Rx"); */ - +/* DumpMsg(pMsg, "Rx"); */ + if ((Control & RX_CTRL_STAT_VALID) != RX_CTRL_STAT_VALID || + (IsBadFrame)) { +#if 0 (FrameStat & (XMR_FS_ANY_ERR | XMR_FS_2L_VLAN)) != 0) { +#endif /* there is a receive error in this frame */ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_RX_PROGRESS, ("skge: Error in received frame, dropped!\n" "Control: %x\nRxStat: %x\n", Control, FrameStat)); + PhysAddr = ((SK_U64) pRxd->VDataHigh) << (SK_U64)32; PhysAddr |= (SK_U64) pRxd->VDataLow; - pci_dma_sync_single(&pAC->PciDev, - (dma_addr_t) PhysAddr, - FrameLength, - PCI_DMA_FROMDEVICE); + pci_dma_sync_single(pAC->PciDev, + (dma_addr_t) PhysAddr, + FrameLength, + PCI_DMA_FROMDEVICE); ReQueueRxBuffer(pAC, pRxPort, pMsg, pRxd->VDataHigh, pRxd->VDataLow); @@ -2165,16 +2546,16 @@ /* * Short frame detected and allocation successfull */ - PhysAddr = ((SK_U64) pRxd->VDataHigh) << (SK_U64)32; - PhysAddr |= (SK_U64) pRxd->VDataLow; - /* use new skb and copy data */ skb_reserve(pNewMsg, 2); skb_put(pNewMsg, FrameLength); - pci_dma_sync_single(&pAC->PciDev, - (dma_addr_t) PhysAddr, - FrameLength, - PCI_DMA_FROMDEVICE); + PhysAddr = ((SK_U64) pRxd->VDataHigh) << (SK_U64)32; + PhysAddr |= (SK_U64) pRxd->VDataLow; + + pci_dma_sync_single(pAC->PciDev, + (dma_addr_t) PhysAddr, + FrameLength, + PCI_DMA_FROMDEVICE); eth_copy_and_sum(pNewMsg, pMsg->data, FrameLength, 0); ReQueueRxBuffer(pAC, pRxPort, pMsg, @@ -2192,10 +2573,10 @@ PhysAddr |= (SK_U64) pRxd->VDataLow; /* release the DMA mapping */ - pci_unmap_page(&pAC->PciDev, - PhysAddr, - pAC->RxBufSize - 2, - PCI_DMA_FROMDEVICE); + pci_unmap_single(pAC->PciDev, + PhysAddr, + pAC->RxBufSize - 2, + PCI_DMA_FROMDEVICE); /* set length in message */ skb_put(pMsg, FrameLength); @@ -2204,11 +2585,13 @@ if (Type == 0x800) { Csum1=le16_to_cpu(pRxd->TcpSums & 0xffff); Csum2=le16_to_cpu((pRxd->TcpSums >> 16) & 0xffff); - if ((Csum1 & 0xfffe) && (Csum2 & 0xfffe)) { + if ((((Csum1 & 0xfffe) && (Csum2 & 0xfffe)) && + (pAC->GIni.GIChipId == CHIP_ID_GENESIS)) || + (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { Result = SkCsGetReceiveInfo(pAC, &pMsg->data[14], Csum1, Csum2, pRxPort->PortIndex); - if (Result == + if (Result == SKCS_STATUS_IP_FRAGMENT || Result == SKCS_STATUS_IP_CSUM_OK || @@ -2216,20 +2599,29 @@ SKCS_STATUS_TCP_CSUM_OK || Result == SKCS_STATUS_UDP_CSUM_OK) { - pMsg->ip_summed = - CHECKSUM_UNNECESSARY; + pMsg->ip_summed = + CHECKSUM_UNNECESSARY; + } else { + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, + SK_DBGCAT_DRV_RX_PROGRESS, + ("skge: CRC error. Frame dropped!\n")); + goto rx_failed; } - } /* checksum calculation valid */ + }/* checksumControl calculation valid */ } /* IP frame */ } /* frame > SK_COPY_TRESHOLD */ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 1,("V")); ForRlmt = SK_RLMT_RX_PROTOCOL; +#if 0 IsBc = (FrameStat & XMR_FS_BC)==XMR_FS_BC; +#endif SK_RLMT_PRE_LOOKAHEAD(pAC, PortIndex, FrameLength, IsBc, &Offset, &NumBytes); if (NumBytes != 0) { +#if 0 IsMc = (FrameStat & XMR_FS_MC)==XMR_FS_MC; +#endif SK_RLMT_LOOKAHEAD(pAC, PortIndex, &pMsg->data[Offset], IsBc, IsMc, &ForRlmt); @@ -2276,10 +2668,22 @@ memcpy((char*)(pRlmtMbuf->pData), (char*)(pMsg->data), FrameLength); - SkEventQueue(pAC, SKGE_RLMT, - SK_RLMT_PACKET_RECEIVED, - EvPara); - pAC->CheckQueue = SK_TRUE; + + /* SlowPathLock needed? */ + if (SlowPathLock == SK_TRUE) { + spin_lock_irqsave(&pAC->SlowPathLock, Flags); + SkEventQueue(pAC, SKGE_RLMT, + SK_RLMT_PACKET_RECEIVED, + EvPara); + pAC->CheckQueue = SK_TRUE; + spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); + } else { + SkEventQueue(pAC, SKGE_RLMT, + SK_RLMT_PACKET_RECEIVED, + EvPara); + pAC->CheckQueue = SK_TRUE; + } + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_RX_PROGRESS, ("Q")); @@ -2315,12 +2719,13 @@ ("Schrottdescriptor, length: 0x%x\n", FrameLength)); /* release the DMA mapping */ + PhysAddr = ((SK_U64) pRxd->VDataHigh) << (SK_U64)32; PhysAddr |= (SK_U64) pRxd->VDataLow; - pci_unmap_page(&pAC->PciDev, - PhysAddr, - pAC->RxBufSize - 2, - PCI_DMA_FROMDEVICE); + pci_unmap_page(pAC->PciDev, + PhysAddr, + pAC->RxBufSize - 2, + PCI_DMA_FROMDEVICE); DEV_KFREE_SKB_IRQ(pRxd->pMBuf); pRxd->pMBuf = NULL; pRxPort->RxdRingFree++; @@ -2386,7 +2791,7 @@ { RXD *pRxd; /* pointer to the current descriptor */ unsigned long Flags; - SK_U64 PhysAddr; +SK_U64 PhysAddr; if (pRxPort->RxdRingFree == pAC->RxDescrPerRing) { return; @@ -2395,12 +2800,13 @@ pRxd = pRxPort->pRxdRingHead; do { if (pRxd->pMBuf != NULL) { + PhysAddr = ((SK_U64) pRxd->VDataHigh) << (SK_U64)32; PhysAddr |= (SK_U64) pRxd->VDataLow; - pci_unmap_page(&pAC->PciDev, - PhysAddr, - pAC->RxBufSize - 2, - PCI_DMA_FROMDEVICE); + pci_unmap_page(pAC->PciDev, + PhysAddr, + pAC->RxBufSize - 2, + PCI_DMA_FROMDEVICE); DEV_KFREE_SKB(pRxd->pMBuf); pRxd->pMBuf = NULL; } @@ -2549,7 +2955,7 @@ * 0, if everything is ok * !=0, on error */ -static int SkGeSetMacAddr(struct net_device *dev, void *p) +static int SkGeSetMacAddr(struct SK_NET_DEVICE *dev, void *p) { DEV_NET *pNet = (DEV_NET*) dev->priv; @@ -2557,12 +2963,12 @@ struct sockaddr *addr = p; unsigned long Flags; - + SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, ("SkGeSetMacAddr starts now...\n")); - if(netif_running(dev)) { + if(netif_running(dev)) return -EBUSY; - } + memcpy(dev->dev_addr, addr->sa_data,dev->addr_len); spin_lock_irqsave(&pAC->SlowPathLock, Flags); @@ -2595,7 +3001,7 @@ * 0, if everything is ok * !=0, on error */ -static void SkGeSetRxMode(struct net_device *dev) +static void SkGeSetRxMode(struct SK_NET_DEVICE *dev) { DEV_NET *pNet; @@ -2649,7 +3055,6 @@ pMcList->dmi_addr[5])); } SkAddrMcUpdate(pAC, pAC->IoBase, PortIdx); - } spin_unlock_irqrestore(&pAC->SlowPathLock, Flags); @@ -2670,7 +3075,7 @@ * 0, if everything is ok * !=0, on error */ -static int SkGeChangeMtu(struct net_device *dev, int NewMtu) +static int SkGeChangeMtu(struct SK_NET_DEVICE *dev, int NewMtu) { DEV_NET *pNet; DEV_NET *pOtherNet; @@ -2689,6 +3094,10 @@ return -EINVAL; } + if(pAC->BoardLevel != 2) { + return -EINVAL; + } + pNet->Mtu = NewMtu; pOtherNet = (DEV_NET*)pAC->dev[1 - pNet->NetNr]->priv; if ((pOtherNet->Mtu > 1500) && (NewMtu <= 1500) && (pOtherNet->Up==1)) { @@ -2704,10 +3113,6 @@ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, ("New MTU: %d\n", NewMtu)); - if(pAC->BoardLevel != 2) { - return 0; - } - /* prevent reconfiguration while changing the MTU */ /* disable interrupts */ @@ -2732,6 +3137,7 @@ spin_lock_irqsave( &pAC->TxPort[i][TX_PRIO_LOW].TxDesRingLock, Flags); netif_stop_queue(pAC->dev[i]); + } /* @@ -2756,7 +3162,7 @@ } } else { - /* use normal anoumt of rx buffers */ + /* use normal amount of rx buffers */ for (i=0; iGIni.GIMacsFound; i++) { /* Found more than one port */ if ((pAC->GIni.GIMacsFound == 2 ) && @@ -2780,10 +3186,6 @@ if (NewMtu > 1500) { // pAC->JumboActivated = SK_TRUE; /* is never set back !!! */ pAC->GIni.GIPortUsage = SK_JUMBO_LINK; - for (i=0; iGIni.GIMacsFound; i++) { - pAC->GIni.GP[i].PRxCmd = - XM_RX_STRIP_FCS | XM_RX_LENERR_OK; - } } else { if ((pAC->GIni.GIMacsFound == 2 ) && @@ -2792,9 +3194,6 @@ } else { pAC->GIni.GIPortUsage = SK_RED_LINK; } - for (i=0; iGIni.GIMacsFound; i++) { - pAC->GIni.GP[i].PRxCmd = XM_RX_STRIP_FCS; - } } SkGeInit( pAC, pAC->IoBase, 1); @@ -2805,6 +3204,12 @@ SkRlmtInit( pAC, pAC->IoBase, 1); SkTimerInit(pAC, pAC->IoBase, 1); + /* + * tschilling: + * Speed and others are set back to default in level 1 init! + */ + GetConfiguration(pAC); + SkGeInit( pAC, pAC->IoBase, 2); SkI2cInit( pAC, pAC->IoBase, 2); SkEventInit(pAC, pAC->IoBase, 2); @@ -2817,7 +3222,7 @@ * clear and reinit the rx rings here */ for (i=0; iGIni.GIMacsFound; i++) { - ReceiveIrq(pAC, &pAC->RxPort[i]); + ReceiveIrq(pAC, &pAC->RxPort[i], SK_TRUE); ClearRxRing(pAC, &pAC->RxPort[i]); FillRxRing(pAC, &pAC->RxPort[i]); @@ -2891,12 +3296,13 @@ * Returns: * pointer to the statistic structure. */ -static struct net_device_stats *SkGeStats(struct net_device *dev) +static struct net_device_stats *SkGeStats(struct SK_NET_DEVICE *dev) { DEV_NET *pNet = (DEV_NET*) dev->priv; SK_AC *pAC = pNet->pAC; SK_PNMI_STRUCT_DATA *pPnmiStruct; /* structure for all Pnmi-Data */ -SK_PNMI_STAT *pPnmiStat; /* pointer to virtual XMAC stat. data */SK_PNMI_CONF *pPnmiConf; /* pointer to virtual link config. */ +SK_PNMI_STAT *pPnmiStat; /* pointer to virtual XMAC stat. data */ +SK_PNMI_CONF *pPnmiConf; /* pointer to virtual link config. */ unsigned int Size; /* size of pnmi struct */ unsigned long Flags; /* for spin lock */ @@ -2915,7 +3321,18 @@ pAC->stats.tx_packets = (SK_U32) pPnmiStat->StatTxOkCts & 0xFFFFFFFF; pAC->stats.rx_bytes = (SK_U32) pPnmiStruct->RxOctetsDeliveredCts; pAC->stats.tx_bytes = (SK_U32) pPnmiStat->StatTxOctetsOkCts; - pAC->stats.rx_errors = (SK_U32) pPnmiStruct->InErrorsCts & 0xFFFFFFFF; + + if (pNet->Mtu <= 1500) { + pAC->stats.rx_errors = (SK_U32) pPnmiStruct->InErrorsCts & 0xFFFFFFFF; + } else { + pAC->stats.rx_errors = (SK_U32) ((pPnmiStruct->InErrorsCts - + pPnmiStat->StatRxTooLongCts) & 0xFFFFFFFF); + } + + + if (pAC->GIni.GP[0].PhyType == SK_PHY_XMAC && pAC->HWRevision < 12) + pAC->stats.rx_errors = pAC->stats.rx_errors - pPnmiStat->StatRxShortsCts; + pAC->stats.tx_errors = (SK_U32) pPnmiStat->StatTxSingleCollisionCts & 0xFFFFFFFF; pAC->stats.rx_dropped = (SK_U32) pPnmiStruct->RxNoBufCts & 0xFFFFFFFF; pAC->stats.tx_dropped = (SK_U32) pPnmiStruct->TxNoBufCts & 0xFFFFFFFF; @@ -2954,7 +3371,7 @@ * 0, if everything is ok * !=0, on error */ -static int SkGeIoctl(struct net_device *dev, struct ifreq *rq, int cmd) +static int SkGeIoctl(struct SK_NET_DEVICE *dev, struct ifreq *rq, int cmd) { DEV_NET *pNet; SK_AC *pAC; @@ -3068,6 +3485,7 @@ SK_AC *pAC) /* pointer to the adapter context structure */ { SK_I32 Port; /* preferred port */ +int LinkSpeed; /* Link speed */ int AutoNeg; /* auto negotiation off (0) or on (1) */ int DuplexCap; /* duplex capabilities (0=both, 1=full, 2=half */ int MSMode; /* master / slave mode selection */ @@ -3097,7 +3515,44 @@ #define AN_SENS 2 /* settings for port A */ - AutoNeg = AN_SENS; /* default: do auto Sense */ + /* settings link speed */ + LinkSpeed = SK_LSPEED_AUTO; /* default: do auto select */ + if (Speed_A != NULL && pAC->IndexIndex] != NULL) { + if (strcmp(Speed_A[pAC->Index],"")==0) { + LinkSpeed = SK_LSPEED_AUTO; + } + else if (strcmp(Speed_A[pAC->Index],"Auto")==0) { + LinkSpeed = SK_LSPEED_AUTO; + } + else if (strcmp(Speed_A[pAC->Index],"10")==0) { + LinkSpeed = SK_LSPEED_10MBPS; + } + else if (strcmp(Speed_A[pAC->Index],"100")==0) { + LinkSpeed = SK_LSPEED_100MBPS; + } + else if (strcmp(Speed_A[pAC->Index],"1000")==0) { + LinkSpeed = SK_LSPEED_1000MBPS; + } + else printk("%s: Illegal value for Speed_A\n", + pAC->dev[0]->name); + } + + /* Check speed parameter */ + /* Only copper type adapter and GE V2 cards */ + if (((pAC->GIni.GIChipId != CHIP_ID_YUKON) || + (pAC->GIni.GICopperType != SK_TRUE)) && + ((LinkSpeed != SK_LSPEED_AUTO) && + (LinkSpeed != SK_LSPEED_1000MBPS))) { + printk("%s: Illegal value for Speed_A. " + "Not a copper card or GE V2 card\n Using " + "speed 1000\n", pAC->dev[0]->name); + LinkSpeed = SK_LSPEED_1000MBPS; + } + pAC->GIni.GP[0].PLinkSpeed = LinkSpeed; + + /* Autonegotiation */ + AutoNeg = AN_ON; /* tschilling: Default: Autonegotiation on! */ AutoSet = SK_FALSE; if (AutoNeg_A != NULL && pAC->IndexIndex] != NULL) { @@ -3219,6 +3674,43 @@ /* settings for port B */ + /* settings link speed */ + LinkSpeed = SK_LSPEED_AUTO; /* default: do auto select */ + if (Speed_B != NULL && pAC->IndexIndex] != NULL) { + if (strcmp(Speed_B[pAC->Index],"")==0) { + LinkSpeed = SK_LSPEED_AUTO; + } + else if (strcmp(Speed_B[pAC->Index],"Auto")==0) { + LinkSpeed = SK_LSPEED_AUTO; + } + else if (strcmp(Speed_B[pAC->Index],"10")==0) { + LinkSpeed = SK_LSPEED_10MBPS; + } + else if (strcmp(Speed_B[pAC->Index],"100")==0) { + LinkSpeed = SK_LSPEED_100MBPS; + } + else if (strcmp(Speed_B[pAC->Index],"1000")==0) { + LinkSpeed = SK_LSPEED_1000MBPS; + } + else printk("%s: Illegal value for Speed_B\n", + pAC->dev[1]->name); + } + + /* Check speed parameter */ + /* Only copper type adapter and GE V2 cards */ + if (((pAC->GIni.GIChipId != CHIP_ID_YUKON) || + (pAC->GIni.GICopperType != SK_TRUE)) && + ((LinkSpeed != SK_LSPEED_AUTO) && + (LinkSpeed != SK_LSPEED_1000MBPS))) { + printk("%s: Illegal value for Speed_B. " + "Not a copper card or GE V2 card\n Using " + "speed 1000\n", pAC->dev[1]->name); + LinkSpeed = SK_LSPEED_1000MBPS; + } + pAC->GIni.GP[1].PLinkSpeed = LinkSpeed; + + /* Auto negotiation */ AutoNeg = AN_SENS; /* default: do auto Sense */ AutoSet = SK_FALSE; if (AutoNeg_B != NULL && pAC->IndexRlmtNets = 1; - + if (RlmtMode != NULL && pAC->IndexIndex] != NULL) { if (strcmp(RlmtMode[pAC->Index], "") == 0) { @@ -3396,10 +3888,6 @@ else { printk("%s: Illegal value for" " RlmtMode, using default\n", pAC->dev[0]->name); - -printk("MacFound = %d\nRlmtMode = %s", pAC->GIni.GIMacsFound, RlmtMode[pAC->Index]); - - pAC->RlmtMode = 0; } } @@ -3551,7 +4039,7 @@ int PciAddr, /* PCI register address */ SK_U32 *pVal) /* pointer to store the read value */ { - pci_read_config_dword(&pAC->PciDev, PciAddr, pVal); + pci_read_config_dword(pAC->PciDev, PciAddr, pVal); return(0); } /* SkPciReadCfgDWord */ @@ -3573,7 +4061,7 @@ int PciAddr, /* PCI register address */ SK_U16 *pVal) /* pointer to store the read value */ { - pci_read_config_word(&pAC->PciDev, PciAddr, pVal); + pci_read_config_word(pAC->PciDev, PciAddr, pVal); return(0); } /* SkPciReadCfgWord */ @@ -3595,7 +4083,7 @@ int PciAddr, /* PCI register address */ SK_U8 *pVal) /* pointer to store the read value */ { - pci_read_config_byte(&pAC->PciDev, PciAddr, pVal); + pci_read_config_byte(pAC->PciDev, PciAddr, pVal); return(0); } /* SkPciReadCfgByte */ @@ -3617,7 +4105,7 @@ int PciAddr, /* PCI register address */ SK_U32 Val) /* pointer to store the read value */ { - pci_write_config_dword(&pAC->PciDev, PciAddr, Val); + pci_write_config_dword(pAC->PciDev, PciAddr, Val); return(0); } /* SkPciWriteCfgDWord */ @@ -3640,7 +4128,7 @@ int PciAddr, /* PCI register address */ SK_U16 Val) /* pointer to store the read value */ { - pci_write_config_word(&pAC->PciDev, PciAddr, Val); + pci_write_config_word(pAC->PciDev, PciAddr, Val); return(0); } /* SkPciWriteCfgWord */ @@ -3663,7 +4151,7 @@ int PciAddr, /* PCI register address */ SK_U8 Val) /* pointer to store the read value */ { - pci_write_config_byte(&pAC->PciDev, PciAddr, Val); + pci_write_config_byte(pAC->PciDev, PciAddr, Val); return(0); } /* SkPciWriteCfgByte */ @@ -3696,6 +4184,7 @@ SK_EVPARA NewPara; /* parameter for further events */ int Stat; unsigned long Flags; +SK_BOOL DualNet; switch (Event) { case SK_DRV_ADAP_FAIL: @@ -3728,18 +4217,27 @@ &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, Flags); SkGeStopPort(pAC, IoC, FromPort, SK_STOP_ALL, SK_HARD_RST); + pAC->dev[Param.Para32[0]]->flags &= ~IFF_RUNNING; spin_unlock_irqrestore( &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, Flags); /* clear rx ring from received frames */ - ReceiveIrq(pAC, &pAC->RxPort[FromPort]); + ReceiveIrq(pAC, &pAC->RxPort[FromPort], SK_FALSE); ClearTxRing(pAC, &pAC->TxPort[FromPort][TX_PRIO_LOW]); spin_lock_irqsave( &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, Flags); - SkGeInitPort(pAC, IoC, FromPort); + + /* tschilling: Handling of return value inserted. */ + if (SkGeInitPort(pAC, IoC, FromPort)) { + if (FromPort == 0) { + printk("%s: SkGeInitPort A failed.\n", pAC->dev[0]->name); + } else { + printk("%s: SkGeInitPort B failed.\n", pAC->dev[1]->name); + } + } SkAddrMcUpdate(pAC,IoC, FromPort); PortReInitBmu(pAC, FromPort); SkGePollTxD(pAC, IoC, FromPort, SK_TRUE); @@ -3755,7 +4253,19 @@ ("NET UP EVENT, Port: %d ", Param.Para32[0])); printk("%s: network connection up using" " port %c\n", pAC->dev[Param.Para32[0]]->name, 'A'+Param.Para32[0]); - printk(" speed: 1000\n"); + + /* tschilling: Values changed according to LinkSpeedUsed. */ + Stat = pAC->GIni.GP[FromPort].PLinkSpeedUsed; + if (Stat == SK_LSPEED_STAT_10MBPS) { + printk(" speed: 10\n"); + } else if (Stat == SK_LSPEED_STAT_100MBPS) { + printk(" speed: 100\n"); + } else if (Stat == SK_LSPEED_STAT_1000MBPS) { + printk(" speed: 1000\n"); + } else { + printk(" speed: unknown\n"); + } + Stat = pAC->GIni.GP[FromPort].PLinkModeStatus; if (Stat == SK_LMODE_STAT_AUTOHALF || Stat == SK_LMODE_STAT_AUTOFULL) { @@ -3784,8 +4294,12 @@ else { printk(" flowctrl: none\n"); } - if (pAC->GIni.GP[FromPort].PhyType != SK_PHY_XMAC) { - Stat = pAC->GIni.GP[FromPort].PMSStatus; + + /* tschilling: Check against CopperType now. */ + if ((pAC->GIni.GICopperType == SK_TRUE) && + (pAC->GIni.GP[FromPort].PLinkSpeedUsed == + SK_LSPEED_STAT_1000MBPS)) { + Stat = pAC->GIni.GP[FromPort].PMSStatus; if (Stat == SK_MS_STAT_MASTER ) { printk(" role: master\n"); } @@ -3796,6 +4310,16 @@ printk(" role: ???\n"); } } + +#ifdef SK_ZEROCOPY + if (pAC->GIni.GIChipId == CHIP_ID_YUKON) + printk(" scatter-gather: enabled\n"); + else + printk(" scatter-gather: disabled\n"); + +#else + printk(" scatter-gather: disabled\n"); +#endif if ((Param.Para32[0] != pAC->ActivePort) && (pAC->RlmtNets == 1)) { @@ -3804,12 +4328,17 @@ SkEventQueue(pAC, SKGE_DRV, SK_DRV_SWITCH_INTERN, NewPara); } + + /* Inform the world that link protocol is up. */ + pAC->dev[Param.Para32[0]]->flags |= IFF_RUNNING; + break; case SK_DRV_NET_DOWN: /* SK_U32 Reason */ /* action list 7 */ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT, ("NET DOWN EVENT ")); printk("%s: network connection down\n", pAC->dev[Param.Para32[1]]->name); + pAC->dev[Param.Para32[1]]->flags &= ~IFF_RUNNING; break; case SK_DRV_SWITCH_HARD: /* SK_U32 FromPortIdx SK_U32 ToPortIdx */ SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT, @@ -3841,8 +4370,8 @@ &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, Flags); - ReceiveIrq(pAC, &pAC->RxPort[FromPort]); /* clears rx ring */ - ReceiveIrq(pAC, &pAC->RxPort[ToPort]); /* clears rx ring */ + ReceiveIrq(pAC, &pAC->RxPort[FromPort], SK_FALSE); /* clears rx ring */ + ReceiveIrq(pAC, &pAC->RxPort[ToPort], SK_FALSE); /* clears rx ring */ ClearTxRing(pAC, &pAC->TxPort[FromPort][TX_PRIO_LOW]); ClearTxRing(pAC, &pAC->TxPort[ToPort][TX_PRIO_LOW]); @@ -3852,13 +4381,37 @@ spin_lock_irqsave( &pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock, Flags); pAC->ActivePort = ToPort; +#if 0 SetQueueSizes(pAC); - SkGeInitPort(pAC, IoC, FromPort); - SkGeInitPort(pAC, IoC, ToPort); +#else + /* tschilling: New common function with minimum size check. */ + DualNet = SK_FALSE; + if (pAC->RlmtNets == 2) { + DualNet = SK_TRUE; + } + + if (SkGeInitAssignRamToQueues( + pAC, + pAC->ActivePort, + DualNet)) { + spin_unlock_irqrestore( + &pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock, Flags); + spin_unlock_irqrestore( + &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock, + Flags); + printk("SkGeInitAssignRamToQueues failed.\n"); + break; + } +#endif + /* tschilling: Handling of return values inserted. */ + if (SkGeInitPort(pAC, IoC, FromPort) || + SkGeInitPort(pAC, IoC, ToPort)) { + printk("%s: SkGeInitPort failed.\n", pAC->dev[0]->name); + } if (Event == SK_DRV_SWITCH_SOFT) { - SkXmRxTxEnable(pAC, IoC, FromPort); + SkMacRxTxEnable(pAC, IoC, FromPort); } - SkXmRxTxEnable(pAC, IoC, ToPort); + SkMacRxTxEnable(pAC, IoC, ToPort); SkAddrSwap(pAC, IoC, FromPort, ToPort); SkAddrMcUpdate(pAC, IoC, FromPort); SkAddrMcUpdate(pAC, IoC, ToPort); @@ -3881,7 +4434,8 @@ pMsg = (struct sk_buff*) pRlmtMbuf->pOs; skb_put(pMsg, pRlmtMbuf->Length); if (XmitFrame(pAC, &pAC->TxPort[pRlmtMbuf->PortIdx][TX_PRIO_LOW], - pMsg) < 0) + pMsg) < 0) + DEV_KFREE_SKB_ANY(pMsg); break; default: @@ -3943,7 +4497,8 @@ } /* SkErrorLog */ -#ifdef DEBUG /***************************************************************/ +#ifdef DEBUG +/****************************************************************************/ /* "debug only" section *****************************************************/ /****************************************************************************/ @@ -4090,7 +4645,7 @@ printk("------------------------\n"); } /* DumpLong */ -#endif /* DEBUG */ +#endif /* * Local variables: diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/skgehwt.c linux.21pre4-ac2/drivers/net/sk98lin/skgehwt.c --- linux.21pre4/drivers/net/sk98lin/skgehwt.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/skgehwt.c 2003-01-06 15:38:18.000000000 +0000 @@ -1,32 +1,23 @@ /****************************************************************************** * * Name: skgehwt.c - * Project: PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.12 $ - * Date: $Date: 1998/10/15 15:11:34 $ + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.13 $ + * Date: $Date: 1999/11/22 13:31:12 $ * Purpose: Hardware Timer. * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1989-1998 SysKonnect, + * (C)Copyright 1998,1999 SysKonnect, * a business unit of Schneider & Koch & Co. Datensysteme GmbH. - * All Rights Reserved * - * THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF SYSKONNECT - * The copyright notice above does not evidence any - * actual or intended publication of such source code. - * - * This Module contains Proprietary Information of SysKonnect - * and should be treated as Confidential. - * - * The information in this file is provided for the exclusive use of - * the licensees of SysKonnect. - * Such users have the right to use, modify, and incorporate this code - * into products for purposes authorized by the license agreement - * provided they include this notice and the associated copyright notice - * with any such product. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * * The information in this file is provided "AS IS" without warranty. * ******************************************************************************/ @@ -36,6 +27,9 @@ * History: * * $Log: skgehwt.c,v $ + * Revision 1.13 1999/11/22 13:31:12 cgoos + * Changed license header to GPL. + * * Revision 1.12 1998/10/15 15:11:34 gklug * fix: ID_sccs to SysKonnectFileId * @@ -83,7 +77,7 @@ Event queue and dispatcher */ static const char SysKonnectFileId[] = - "$Header: /usr56/projects/ge/schedule/skgehwt.c,v 1.12 1998/10/15 15:11:34 gklug Exp $" ; + "$Header: /usr56/projects/ge/schedule/skgehwt.c,v 1.13 1999/11/22 13:31:12 cgoos Exp $" ; #include "h/skdrv1st.h" /* Driver Specific Definitions */ #include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/skgeinit.c linux.21pre4-ac2/drivers/net/sk98lin/skgeinit.c --- linux.21pre4/drivers/net/sk98lin/skgeinit.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/skgeinit.c 2003-01-06 15:38:18.000000000 +0000 @@ -2,15 +2,15 @@ * * Name: skgeinit.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.63 $ - * Date: $Date: 2001/04/05 11:02:09 $ + * Version: $Revision: 1.82 $ + * Date: $Date: 2002/12/05 13:40:21 $ * Purpose: Contains functions to initialize the GE HW * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2001 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +26,108 @@ * History: * * $Log: skgeinit.c,v $ + * Revision 1.82 2002/12/05 13:40:21 rschmidt + * Added setting of Rx GMAC FIFO Flush Mask register. + * Corrected PhyType with new define SK_PHY_MARV_FIBER when + * YUKON Fiber board was found. + * Editorial changes. + * + * Revision 1.81 2002/11/15 12:48:35 rschmidt + * Replaced message SKERR_HWI_E018 with SKERR_HWI_E024 for Rx queue error + * in SkGeStopPort(). + * Added init for pAC->GIni.GIGenesis with SK_FALSE in YUKON-branch. + * Editorial changes. + * + * Revision 1.80 2002/11/12 17:28:30 rschmidt + * Initialized GIPciSlot64 and GIPciClock66 in SkGeInit1(). + * Reduced PCI FIFO watermarks for 32bit/33MHz bus in SkGeInitBmu(). + * Editorial changes. + * + * Revision 1.79 2002/10/21 09:31:02 mkarl + * Changed SkGeInitAssignRamToQueues(), removed call to + * SkGeInitAssignRamToQueues in SkGeInit1 and fixed compiler warning in + * SkGeInit1. + * + * Revision 1.78 2002/10/16 15:55:07 mkarl + * Fixed a bug in SkGeInitAssignRamToQueues. + * + * Revision 1.77 2002/10/14 15:07:22 rschmidt + * Corrected timeout handling for Rx queue in SkGeStopPort() (#10748) + * Editorial changes. + * + * Revision 1.76 2002/10/11 09:24:38 mkarl + * Added check for HW self test results. + * + * Revision 1.75 2002/10/09 16:56:44 mkarl + * Now call SkGeInitAssignRamToQueues() in Init Level 1 in order to assign + * the adapter memory to the queues. This default assignment is not suitable + * for dual net mode. + * + * Revision 1.74 2002/09/12 08:45:06 rwahl + * Set defaults for PMSCap, PLinkSpeed & PLinkSpeedCap dependent on PHY. + * + * Revision 1.73 2002/08/16 15:19:45 rschmidt + * Corrected check for Tx queues in SkGeCheckQSize(). + * Added init for new entry GIGenesis and GICopperType + * Replaced all if(GIChipId == CHIP_ID_GENESIS) with new entry GIGenesis. + * Replaced wrong 1st para pAC with IoC in SK_IN/OUT macros. + * + * Revision 1.72 2002/08/12 13:38:55 rschmidt + * Added check if VAUX is available (stored in GIVauxAvail) + * Initialized PLinkSpeedCap in Port struct with SK_LSPEED_CAP_1000MBPS + * Editorial changes. + * + * Revision 1.71 2002/08/08 16:32:58 rschmidt + * Added check for Tx queues in SkGeCheckQSize(). + * Added start of Time Stamp Timer (YUKON) in SkGeInit2(). + * Editorial changes. + * + * Revision 1.70 2002/07/23 16:04:26 rschmidt + * Added init for GIWolOffs (HW-Bug in YUKON 1st rev.) + * Minor changes + * + * Revision 1.69 2002/07/17 17:07:08 rwahl + * - SkGeInit1(): fixed PHY type debug output; corrected init of GIFunc + * table & GIMacType. + * - Editorial changes. + * + * Revision 1.68 2002/07/15 18:38:31 rwahl + * Added initialization for MAC type dependent function table. + * + * Revision 1.67 2002/07/15 15:45:39 rschmidt + * Added Tx Store & Forward for YUKON (GMAC Tx FIFO is only 1 kB) + * Replaced SK_PHY_MARV by SK_PHY_MARV_COPPER + * Editorial changes + * + * Revision 1.66 2002/06/10 09:35:08 rschmidt + * Replaced C++ comments (//) + * Editorial changes + * + * Revision 1.65 2002/06/05 08:33:37 rschmidt + * Changed GIRamSize and Reset sequence for YUKON. + * SkMacInit() replaced by SkXmInitMac() resp. SkGmInitMac() + * + * Revision 1.64 2002/04/25 13:03:20 rschmidt + * Changes for handling YUKON. + * Removed reference to xmac_ii.h (not necessary). + * Moved all defines into header file. + * Replaced all SkXm...() functions with SkMac...() to handle also + * YUKON's GMAC. + * Added handling for GMAC FIFO in SkGeInitMacFifo(), SkGeStopPort(). + * Removed 'goto'-directive from SkGeCfgSync(), SkGeCheckQSize(). + * Replaced all XMAC-access macros by functions: SkMacRxTxDisable(), + * SkMacFlushTxFifo(). + * Optimized timeout handling in SkGeStopPort(). + * Initialized PLinkSpeed in Port struct with SK_LSPEED_AUTO. + * Release of GMAC Link Control reset in SkGeInit1(). + * Initialized GIChipId and GIChipRev in GE Init structure. + * Added GIRamSize and PhyType values for YUKON. + * Removed use of PRxCmd to setup XMAC. + * Moved setting of XM_RX_DIS_CEXT to SkXmInitMac(). + * Use of SkGeXmitLED() only for GENESIS. + * Changes for V-CPU support. + * Editorial changes. + * * Revision 1.63 2001/04/05 11:02:09 rassmann * Stop Port check of the STOP bit did not take 2/18 sec as wanted. * @@ -139,7 +241,7 @@ * chg: Default is autosensing with AUTOFULL mode * * Revision 1.31 1998/11/25 15:36:16 gklug - * fix: do NOT stop LED Timer when port should be stoped + * fix: do NOT stop LED Timer when port should be stopped * * Revision 1.30 1998/11/24 13:15:28 gklug * add: Init PCkeckPar struct member @@ -150,8 +252,8 @@ * transmit timeouts. * Add TestStopBit() function to handle stop RX/TX * problem with active descriptor poll timers. - * Bug Fix: Descriptor Poll Timer not started, beacuse - * GIPollTimerVal was initilaized with 0. + * Bug Fix: Descriptor Poll Timer not started, because + * GIPollTimerVal was initialized with 0. * * Revision 1.28 1998/11/13 14:24:26 malthoff * Bug Fix: SkGeStopPort() may hang if a Packet Arbiter Timout @@ -181,7 +283,7 @@ * * Revision 1.21 1998/10/20 12:11:56 malthoff * Don't dendy the Queue config if the size of the unused - * rx qeueu is zero. + * Rx qeueu is zero. * * Revision 1.20 1998/10/19 07:27:58 malthoff * SkGeInitRamIface() is public to be called by diagnostics. @@ -268,39 +370,21 @@ * * Revision 1.1 1998/07/23 09:48:57 malthoff * Creation. First dummy 'C' file. - * SkGeInit(Level 0) is card_start for ML. - * SkGeDeInit() is card_stop for ML. + * SkGeInit(Level 0) is card_start for GE. + * SkGeDeInit() is card_stop for GE. * * ******************************************************************************/ #include "h/skdrv1st.h" -#include "h/xmac_ii.h" #include "h/skdrv2nd.h" -/* defines ********************************************************************/ - -/* defines for SkGeXmitLed() */ -#define XMIT_LED_INI 0 -#define XMIT_LED_CNT (RX_LED_VAL - RX_LED_INI) -#define XMIT_LED_CTRL (RX_LED_CTRL- RX_LED_INI) -#define XMIT_LED_TST (RX_LED_TST - RX_LED_INI) - -/* Queue Size units */ -#define QZ_UNITS 0x7 - -/* Types of RAM Buffer Queues */ -#define SK_RX_SRAM_Q 1 /* small receive queue */ -#define SK_RX_BRAM_Q 2 /* big receive queue */ -#define SK_TX_RAM_Q 3 /* small or big transmit queue */ - -/* typedefs *******************************************************************/ /* global variables ***********************************************************/ /* local variables ************************************************************/ static const char SysKonnectFileId[] = - "@(#)$Id: skgeinit.c,v 1.63 2001/04/05 11:02:09 rassmann Exp $ (C) SK "; + "@(#)$Id: skgeinit.c,v 1.82 2002/12/05 13:40:21 rschmidt Exp $ (C) SK "; struct s_QOffTab { int RxQOff; /* Receive Queue Address Offset */ @@ -314,11 +398,11 @@ /****************************************************************************** * - * SkGePollRxD() - Enable/Disable Descriptor Polling of RxD Ring + * SkGePollRxD() - Enable / Disable Descriptor Polling of RxD Ring * * Description: * Enable or disable the descriptor polling the receive descriptor - * ring (RxD) of port 'port'. + * ring (RxD) of port 'Port'. * The new configuration is *not* saved over any SkGeStopPort() and * SkGeInitPort() calls. * @@ -335,22 +419,18 @@ pPrt = &pAC->GIni.GP[Port]; - if (PollRxD) { - SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), CSR_ENA_POL); - } - else { - SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), CSR_DIS_POL); - } + SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), (PollRxD) ? + CSR_ENA_POL : CSR_DIS_POL); } /* SkGePollRxD */ /****************************************************************************** * - * SkGePollTxD() - Enable/Disable Descriptor Polling of TxD Rings + * SkGePollTxD() - Enable / Disable Descriptor Polling of TxD Rings * * Description: * Enable or disable the descriptor polling the transmit descriptor - * ring(s) (RxD) of port 'port'. + * ring(s) (TxD) of port 'Port'. * The new configuration is *not* saved over any SkGeStopPort() and * SkGeInitPort() calls. * @@ -368,16 +448,12 @@ pPrt = &pAC->GIni.GP[Port]; - if (PollTxD) { - DWord = CSR_ENA_POL; - } - else { - DWord = CSR_DIS_POL; - } + DWord = (PollTxD) ? CSR_ENA_POL : CSR_DIS_POL; if (pPrt->PXSQSize != 0) { SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), DWord); } + if (pPrt->PXAQSize != 0) { SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), DWord); } @@ -397,7 +473,7 @@ * Returns: * nothing */ -void SkGeYellowLED( +void SkGeYellowLED( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ int State) /* yellow LED state, 0 = OFF, 0 != ON */ @@ -430,7 +506,7 @@ * Returns: * nothing */ -void SkGeXmitLED( +void SkGeXmitLED( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ int Led, /* offset to the LED Init Value register */ @@ -475,9 +551,8 @@ * DoCalcAddr() - Calculates the start and the end address of a queue. * * Description: - * This function calculates the start- end the end address - * of a queue. Afterwards the 'StartVal' is incremented to the - * next start position. + * This function calculates the start and the end address of a queue. + * Afterwards the 'StartVal' is incremented to the next start position. * If the port is already initialized the calculated values * will be checked against the configured values and an * error will be returned, if they are not equal. @@ -498,7 +573,7 @@ { SK_U32 EndVal; SK_U32 NextStart; - int Rtv; + int Rtv; Rtv = 0; if (QuSize == 0) { @@ -521,9 +596,125 @@ } *StartVal = NextStart; - return (Rtv); + return(Rtv); } /* DoCalcAddr */ +/****************************************************************************** + * + * SkGeInitAssignRamToQueues() - allocate default queue sizes + * + * Description: + * This function assigns the memory to the different queues and ports. + * When DualNet is set to SK_TRUE all ports get the same amount of memory. + * Otherwise the first port gets most of the memory and all the + * other ports just the required minimum. + * This function can only be called when pAC->GIni.GIRamSize and + * pAC->GIni.GIMacsFound have been initialized, usually this happens + * at init level 1 + * + * Returns: + * 0 - ok + * 1 - invalid input values + * 2 - not enough memory + */ + +int SkGeInitAssignRamToQueues( +SK_AC *pAC, /* Adapter context */ +int ActivePort, /* Active Port in RLMT mode */ +SK_BOOL DualNet) /* adapter context */ +{ + int i; + int UsedKilobytes; /* memory already assigned */ + int ActivePortKilobytes; /* memory available for active port */ + SK_GEPORT *pGePort; + + UsedKilobytes = 0; + + if (ActivePort >= pAC->GIni.GIMacsFound) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_INIT, + ("SkGeInitAssignRamToQueues: ActivePort (%d) invalid\n", + ActivePort)); + return(1); + } + if (((pAC->GIni.GIMacsFound * (SK_MIN_RXQ_SIZE + SK_MIN_TXQ_SIZE)) + + ((RAM_QUOTA_SYNC == 0) ? 0 : SK_MIN_TXQ_SIZE)) > pAC->GIni.GIRamSize) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_INIT, + ("SkGeInitAssignRamToQueues: Not enough memory (%d)\n", + pAC->GIni.GIRamSize)); + return(2); + } + + + if (DualNet) { + /* every port gets the same amount of memory */ + ActivePortKilobytes = pAC->GIni.GIRamSize / pAC->GIni.GIMacsFound; + for (i = 0; i < pAC->GIni.GIMacsFound; i++) { + + pGePort = &pAC->GIni.GP[i]; + + /* take away the minimum memory for active queues */ + ActivePortKilobytes -= (SK_MIN_RXQ_SIZE + SK_MIN_TXQ_SIZE); + + /* receive queue gets the minimum + 80% of the rest */ + pGePort->PRxQSize = (int) (ROUND_QUEUE_SIZE_KB(( + ActivePortKilobytes * (unsigned long) RAM_QUOTA_RX) / 100)) + + SK_MIN_RXQ_SIZE; + + ActivePortKilobytes -= (pGePort->PRxQSize - SK_MIN_RXQ_SIZE); + + /* synchronous transmit queue */ + pGePort->PXSQSize = 0; + + /* asynchronous transmit queue */ + pGePort->PXAQSize = (int) ROUND_QUEUE_SIZE_KB(ActivePortKilobytes + + SK_MIN_TXQ_SIZE); + } + } + else { + /* Rlmt Mode or single link adapter */ + + /* Set standby queue size defaults for all standby ports */ + for (i = 0; i < pAC->GIni.GIMacsFound; i++) { + + if (i != ActivePort) { + pGePort = &pAC->GIni.GP[i]; + + pGePort->PRxQSize = SK_MIN_RXQ_SIZE; + pGePort->PXAQSize = SK_MIN_TXQ_SIZE; + pGePort->PXSQSize = 0; + + /* Count used RAM */ + UsedKilobytes += pGePort->PRxQSize + pGePort->PXAQSize; + } + } + /* what's left? */ + ActivePortKilobytes = pAC->GIni.GIRamSize - UsedKilobytes; + + /* assign it to the active port */ + /* first take away the minimum memory */ + ActivePortKilobytes -= (SK_MIN_RXQ_SIZE + SK_MIN_TXQ_SIZE); + pGePort = &pAC->GIni.GP[ActivePort]; + + /* receive queue get's the minimum + 80% of the rest */ + pGePort->PRxQSize = (int) (ROUND_QUEUE_SIZE_KB((ActivePortKilobytes * + (unsigned long) RAM_QUOTA_RX) / 100)) + SK_MIN_RXQ_SIZE; + + ActivePortKilobytes -= (pGePort->PRxQSize - SK_MIN_RXQ_SIZE); + + /* synchronous transmit queue */ + pGePort->PXSQSize = 0; + + /* asynchronous transmit queue */ + pGePort->PXAQSize = (int) ROUND_QUEUE_SIZE_KB(ActivePortKilobytes) + + SK_MIN_TXQ_SIZE; + } +#ifdef VCPU + VCPUprintf(0, "PRxQSize=%u, PXSQSize=%u, PXAQSize=%u\n", + pGePort->PRxQSize, pGePort->PXSQSize, pGePort->PXAQSize); +#endif /* VCPU */ + + return(0); +} /* SkGeInitAssignRamToQueues */ /****************************************************************************** * @@ -531,18 +722,20 @@ * * Description: * This function verifies the Queue Size Configuration specified - * in the variabels PRxQSize, PXSQSize, and PXAQSize of all + * in the variables PRxQSize, PXSQSize, and PXAQSize of all * used ports. * This requirements must be fullfilled to have a valid configuration: * - The size of all queues must not exceed GIRamSize. * - The queue sizes must be specified in units of 8 kB. - * - The size of rx queues of available ports must not be - * smaller than 16kB. + * - The size of Rx queues of available ports must not be + * smaller than 16 kB. + * - The size of at least one Tx queue (synch. or asynch.) + * of available ports must not be smaller than 16 kB + * when Jumbo Frames are used. * - The RAM start and end addresses must not be changed * for ports which are already initialized. - * Furthermore SkGeCheckQSize() defines the Start and End - * Addresses of all ports and stores them into the HWAC port - * structure. + * Furthermore SkGeCheckQSize() defines the Start and End Addresses + * of all ports and stores them into the HWAC port structure. * * Returns: * 0: Queue Size Configuration valid @@ -553,7 +746,7 @@ int Port) /* port index */ { SK_GEPORT *pPrt; - int UsedMem; + int UsedMem; /* total memory used (max. found ports) */ int i; int Rtv; int Rtv2; @@ -564,27 +757,37 @@ for (i = 0; i < pAC->GIni.GIMacsFound; i++) { pPrt = &pAC->GIni.GP[i]; - if (( pPrt->PRxQSize & QZ_UNITS) || - (pPrt->PXSQSize & QZ_UNITS) || - (pPrt->PXAQSize & QZ_UNITS)) { + if ((pPrt->PRxQSize & QZ_UNITS) != 0 || + (pPrt->PXSQSize & QZ_UNITS) != 0 || + (pPrt->PXAQSize & QZ_UNITS) != 0) { SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E012, SKERR_HWI_E012MSG); - Rtv = 1; - goto CheckQSizeEnd; + return(1); } - UsedMem += pPrt->PRxQSize + pPrt->PXSQSize + pPrt->PXAQSize; - if (i == Port && pPrt->PRxQSize < SK_MIN_RXQ_SIZE) { SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E011, SKERR_HWI_E011MSG); - Rtv = 1; - goto CheckQSizeEnd; + return(1); } + + /* + * the size of at least one Tx queue (synch. or asynch.) has to be > 0. + * if Jumbo Frames are used, this size has to be >= 16 kB. + */ + if ((i == Port && pPrt->PXSQSize == 0 && pPrt->PXAQSize == 0) || + (pAC->GIni.GIPortUsage == SK_JUMBO_LINK && + ((pPrt->PXSQSize > 0 && pPrt->PXSQSize < SK_MIN_TXQ_SIZE) || + (pPrt->PXAQSize > 0 && pPrt->PXAQSize < SK_MIN_TXQ_SIZE)))) { + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E023, SKERR_HWI_E023MSG); + return(1); + } + + UsedMem += pPrt->PRxQSize + pPrt->PXSQSize + pPrt->PXAQSize; } + if (UsedMem > pAC->GIni.GIRamSize) { SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E012, SKERR_HWI_E012MSG); - Rtv = 1; - goto CheckQSizeEnd; + return(1); } /* Now start address calculation */ @@ -597,24 +800,23 @@ &pPrt->PRxQRamStart, &pPrt->PRxQRamEnd); Rtv |= Rtv2; - /* Calculate/Check values for the synchronous tx queue */ + /* Calculate/Check values for the synchronous Tx queue */ Rtv2 = DoCalcAddr(pAC, pPrt, pPrt->PXSQSize, &StartAddr, &pPrt->PXsQRamStart, &pPrt->PXsQRamEnd); Rtv |= Rtv2; - /* Calculate/Check values for the asynchronous tx queue */ + /* Calculate/Check values for the asynchronous Tx queue */ Rtv2 = DoCalcAddr(pAC, pPrt, pPrt->PXAQSize, &StartAddr, &pPrt->PXaQRamStart, &pPrt->PXaQRamEnd); Rtv |= Rtv2; if (Rtv) { SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E013, SKERR_HWI_E013MSG); - break; + return(1); } } -CheckQSizeEnd: - return (Rtv); + return(0); } /* SkGeCheckQSize */ @@ -625,10 +827,10 @@ * Description: * This function initializes the MAC Arbiter. * It must not be called if there is still an - * initilaized or active port. + * initialized or active port. * * Returns: - * nothing: + * nothing */ static void SkGeInitMacArb( SK_AC *pAC, /* adapter context */ @@ -652,7 +854,7 @@ /* Fast Output Enable Mode was intended to use with Rev. B2, but now? */ /* - * There is not start or enable buttom to push, therefore + * There is not start or enable button to push, therefore * the MAC arbiter is configured and enabled now. */ } /* SkGeInitMacArb */ @@ -665,10 +867,10 @@ * Description: * This function initializes the Packet Arbiter. * It must not be called if there is still an - * initilaized or active port. + * initialized or active port. * * Returns: - * nothing: + * nothing */ static void SkGeInitPktArb( SK_AC *pAC, /* adapter context */ @@ -693,7 +895,7 @@ SK_OUT16(IoC, B3_PA_CTRL, PA_ENA_TO_TX1); } else { - SK_OUT16(IoC, B3_PA_CTRL,(PA_ENA_TO_TX1 | PA_ENA_TO_TX2)); + SK_OUT16(IoC, B3_PA_CTRL, PA_ENA_TO_TX1 | PA_ENA_TO_TX2); } } } /* SkGeInitPktArb */ @@ -714,6 +916,9 @@ SK_IOC IoC, /* IO context */ int Port) /* Port Index (MAC_1 + n) */ { +#ifdef VCPU + SK_U32 DWord; +#endif /* VCPU */ /* * For each FIFO: * - release local reset @@ -721,19 +926,45 @@ * - setup defaults for the control register * - enable the FIFO */ - /* Configure RX MAC FIFO */ - SK_OUT8(IoC, MR_ADDR(Port, RX_MFF_CTRL2), MFF_RST_CLR); - SK_OUT16(IoC, MR_ADDR(Port, RX_MFF_CTRL1), MFF_RX_CTRL_DEF); - SK_OUT8(IoC, MR_ADDR(Port, RX_MFF_CTRL2), MFF_ENA_OP_MD); - - /* Configure TX MAC FIFO */ - SK_OUT8(IoC, MR_ADDR(Port, TX_MFF_CTRL2), MFF_RST_CLR); - SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); - SK_OUT8(IoC, MR_ADDR(Port, TX_MFF_CTRL2), MFF_ENA_OP_MD); - - /* Enable frame flushing if jumbo frames used */ - if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) { - SK_OUT16(IoC, MR_ADDR(Port, RX_MFF_CTRL1), MFF_ENA_FLUSH); + + if (pAC->GIni.GIGenesis) { + /* Configure Rx MAC FIFO */ + SK_OUT8(IoC, MR_ADDR(Port, RX_MFF_CTRL2), MFF_RST_CLR); + SK_OUT16(IoC, MR_ADDR(Port, RX_MFF_CTRL1), MFF_RX_CTRL_DEF); + SK_OUT8(IoC, MR_ADDR(Port, RX_MFF_CTRL2), MFF_ENA_OP_MD); + + /* Configure Tx MAC FIFO */ + SK_OUT8(IoC, MR_ADDR(Port, TX_MFF_CTRL2), MFF_RST_CLR); + SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); + SK_OUT8(IoC, MR_ADDR(Port, TX_MFF_CTRL2), MFF_ENA_OP_MD); + + /* Enable frame flushing if jumbo frames used */ + if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) { + SK_OUT16(IoC, MR_ADDR(Port, RX_MFF_CTRL1), MFF_ENA_FLUSH); + } + } + else { + /* Configure Rx MAC FIFO */ + SK_OUT8(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), (SK_U8)GMF_RST_CLR); + SK_OUT32(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), GMF_RX_CTRL_DEF | + GMF_RX_F_FL_ON); /* enable Rx GMAC FIFO Flush */ + + /* set Rx GMAC FIFO Flush Mask */ + SK_OUT16(IoC, MR_ADDR(Port, RX_GMF_FL_MSK), (SK_U16)RX_FF_FL_DEF_MSK); + + /* use Rx GMAC FIFO Flush Threshold default value (0x0a == 56 bytes) */ + + /* Configure Tx MAC FIFO */ + SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), (SK_U8)GMF_RST_CLR); + SK_OUT32(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), GMF_TX_CTRL_DEF); + +#ifdef VCPU + SK_IN32(IoC, MR_ADDR(Port, RX_GMF_AF_THR), &DWord); + SK_IN32(IoC, MR_ADDR(Port, TX_GMF_AE_THR), &DWord); +#endif /* VCPU */ + + /* set Tx GMAC FIFO Almost Empty Threshold */ +/* SK_OUT32(IoC, MR_ADDR(Port, TX_GMF_AE_THR), 0); */ } } /* SkGeInitMacFifo */ @@ -750,11 +981,11 @@ * * Note: * o To ensure receiving the Link Sync Event the LinkSyncCounter - * should be initialized BEFORE clearing the XMACs reset! + * should be initialized BEFORE clearing the XMAC's reset! * o Enable IS_LNK_SYNC_M1 and IS_LNK_SYNC_M2 after calling this * function. * - * Retruns: + * Returns: * nothing */ void SkGeLoadLnkSyncCnt( @@ -784,13 +1015,13 @@ SK_IN32(IoC, B0_IMSK, &OrgIMsk); if (Port == MAC_1) { NewIMsk = OrgIMsk & ~IS_LNK_SYNC_M1; - if (ISrc & IS_LNK_SYNC_M1) { + if ((ISrc & IS_LNK_SYNC_M1) != 0) { IrqPend = SK_TRUE; } } else { NewIMsk = OrgIMsk & ~IS_LNK_SYNC_M2; - if (ISrc & IS_LNK_SYNC_M2) { + if ((ISrc & IS_LNK_SYNC_M2) != 0) { IrqPend = SK_TRUE; } } @@ -829,14 +1060,14 @@ * TXA_ENA_FSYNC. This means if the size of * the synchronous queue is unequal zero but no specific * synchronous bandwidth is configured, the synchronous queue - * will always have the 'unlimitted' transmit priority! + * will always have the 'unlimited' transmit priority! * * This mode will be restored if the synchronous bandwidth is * deallocated ('IntTime' = 0 and 'LimCount' = 0). * * Returns: * 0: success - * 1: paramter configuration error + * 1: parameter configuration error * 2: try to configure quality of service although no * synchronous queue is configured */ @@ -855,59 +1086,59 @@ /* check the parameters */ if (LimCount > IntTime || (LimCount == 0 && IntTime != 0) || - (LimCount !=0 && IntTime == 0)) { + (LimCount != 0 && IntTime == 0)) { SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E010, SKERR_HWI_E010MSG); - Rtv = 1; - goto CfgSyncEnd; + return(1); } - if (pAC->GIni.GP[Port].PXSQSize != 0) { - /* calculate register values */ - IntTime = (IntTime / 2) * pAC->GIni.GIHstClkFact / 100; - LimCount = LimCount / 8; - if (IntTime > TXA_MAX_VAL || LimCount > TXA_MAX_VAL) { - SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E010, SKERR_HWI_E010MSG); - Rtv = 1; - goto CfgSyncEnd; - } - - /* - * - Enable 'Force Sync' to ensure the synchronous queue - * has the priority while configuring the new values. - * - Also 'disable alloc' to ensure the settings complies - * to the SyncMode parameter. - * - Disable 'Rate Control' to configure the new values. - * - write IntTime and Limcount - * - start 'Rate Control' and disable 'Force Sync' - * if Interval Timer or Limit Counter not zero. - */ - SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL), - TXA_ENA_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); - SK_OUT32(IoC, MR_ADDR(Port, TXA_ITI_INI), IntTime); - SK_OUT32(IoC, MR_ADDR(Port, TXA_LIM_INI), LimCount); - SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL), - (SyncMode & (TXA_ENA_ALLOC|TXA_DIS_ALLOC))); - if (IntTime != 0 || LimCount != 0) { - SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL), - TXA_DIS_FSYNC|TXA_START_RC); - } - } - else { + + if (pAC->GIni.GP[Port].PXSQSize == 0) { SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E009, SKERR_HWI_E009MSG); - Rtv = 2; + return(2); + } + + /* calculate register values */ + IntTime = (IntTime / 2) * pAC->GIni.GIHstClkFact / 100; + LimCount = LimCount / 8; + + if (IntTime > TXA_MAX_VAL || LimCount > TXA_MAX_VAL) { + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E010, SKERR_HWI_E010MSG); + return(1); + } + + /* + * - Enable 'Force Sync' to ensure the synchronous queue + * has the priority while configuring the new values. + * - Also 'disable alloc' to ensure the settings complies + * to the SyncMode parameter. + * - Disable 'Rate Control' to configure the new values. + * - write IntTime and LimCount + * - start 'Rate Control' and disable 'Force Sync' + * if Interval Timer or Limit Counter not zero. + */ + SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL), + TXA_ENA_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); + + SK_OUT32(IoC, MR_ADDR(Port, TXA_ITI_INI), IntTime); + SK_OUT32(IoC, MR_ADDR(Port, TXA_LIM_INI), LimCount); + + SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL), + (SK_U8)(SyncMode & (TXA_ENA_ALLOC | TXA_DIS_ALLOC))); + + if (IntTime != 0 || LimCount != 0) { + SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL), TXA_DIS_FSYNC | TXA_START_RC); } -CfgSyncEnd: - return (Rtv); + return(0); } /* SkGeCfgSync */ /****************************************************************************** * - * DoInitRamQueue() - Initilaize the RAM Buffer Address of a single Queue + * DoInitRamQueue() - Initialize the RAM Buffer Address of a single Queue * * Desccription: - * If the queue is used, enable and initilaize it. + * If the queue is used, enable and initialize it. * Make sure the queue is still reset, if it is not used. * * Returns: @@ -952,22 +1183,21 @@ /* write threshold for Rx Queue */ SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_RX_UTPP), RxUpThresVal); - SK_OUT32(IoC, RB_ADDR(QuIoOffs,RB_RX_LTPP), RxLoThresVal); + SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_RX_LTPP), RxLoThresVal); /* the high priority threshold not used */ break; case SK_TX_RAM_Q: /* - * Do NOT use Store and forward under normal - * operation due to performance optimization. - * But if Jumbo frames are configured we NEED - * the store and forward of the RAM buffer. + * Do NOT use Store & Forward under normal operation due to + * performance optimization (GENESIS only). + * But if Jumbo Frames are configured (XMAC Tx FIFO is only 4 kB) + * or YUKON is used ((GMAC Tx FIFO is only 1 kB) + * we NEED Store & Forward of the RAM buffer. */ - if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) { - /* - * enable Store & Forward Mode for the - * Tx Side - */ + if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK || + !pAC->GIni.GIGenesis) { + /* enable Store & Forward Mode for the Tx Side */ SK_OUT8(IoC, RB_ADDR(QuIoOffs, RB_CTRL), RB_ENA_STFWD); } break; @@ -980,7 +1210,7 @@ /* ensure the queue is still disabled */ SK_OUT8(IoC, RB_ADDR(QuIoOffs, RB_CTRL), RB_RST_SET); } -} /* DoInitRamQueue*/ +} /* DoInitRamQueue */ /****************************************************************************** @@ -1012,10 +1242,13 @@ DoInitRamQueue(pAC, IoC, pPrt->PRxQOff, pPrt->PRxQRamStart, pPrt->PRxQRamEnd, RxQType); + DoInitRamQueue(pAC, IoC, pPrt->PXsQOff, pPrt->PXsQRamStart, pPrt->PXsQRamEnd, SK_TX_RAM_Q); + DoInitRamQueue(pAC, IoC, pPrt->PXaQOff, pPrt->PXaQRamStart, pPrt->PXaQRamEnd, SK_TX_RAM_Q); + } /* SkGeInitRamBufs */ @@ -1024,7 +1257,7 @@ * SkGeInitRamIface() - Initialize the RAM Interface * * Description: - * This function initializes the Adapbers RAM Interface. + * This function initializes the Adapters RAM Interface. * * Note: * This function is used in the diagnostics. @@ -1052,6 +1285,7 @@ SK_OUT8(IoC, B3_RI_RTO_R2, SK_RI_TO_53); SK_OUT8(IoC, B3_RI_RTO_XA2, SK_RI_TO_53); SK_OUT8(IoC, B3_RI_RTO_XS2, SK_RI_TO_53); + } /* SkGeInitRamIface */ @@ -1070,25 +1304,37 @@ SK_IOC IoC, /* IO context */ int Port) /* Port Index (MAC_1 + n) */ { - SK_GEPORT *pPrt; + SK_GEPORT *pPrt; + SK_U32 RxWm; + SK_U32 TxWm; pPrt = &pAC->GIni.GP[Port]; + RxWm = SK_BMU_RX_WM; + TxWm = SK_BMU_TX_WM; + + if (!pAC->GIni.GIPciSlot64 && !pAC->GIni.GIPciClock66) { + /* for better performance */ + RxWm /= 2; + TxWm /= 2; + } + /* Rx Queue: Release all local resets and set the watermark */ SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), CSR_CLR_RESET); - SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_F), SK_BMU_RX_WM); + SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_F), RxWm); /* - * Tx Queue: Release all local resets if the queue is used! + * Tx Queue: Release all local resets if the queue is used ! * set watermark */ if (pPrt->PXSQSize != 0) { SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), CSR_CLR_RESET); - SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_F), SK_BMU_TX_WM); + SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_F), TxWm); } + if (pPrt->PXAQSize != 0) { SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), CSR_CLR_RESET); - SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_F), SK_BMU_TX_WM); + SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_F), TxWm); } /* * Do NOT enable the descriptor poll timers here, because @@ -1107,7 +1353,7 @@ * that RX/TX stop is done and SV idle is NOT set. * In this case we have to issue another stop command. * - * Retruns: + * Returns: * The queues control status register */ static SK_U32 TestStopBit( @@ -1118,12 +1364,16 @@ SK_U32 QuCsr; /* CSR contents */ SK_IN32(IoC, Q_ADDR(QuIoOffs, Q_CSR), &QuCsr); - if ((QuCsr & (CSR_STOP|CSR_SV_IDLE)) == 0) { + + if ((QuCsr & (CSR_STOP | CSR_SV_IDLE)) == 0) { + /* Stop Descriptor overridden by start command */ SK_OUT32(IoC, Q_ADDR(QuIoOffs, Q_CSR), CSR_STOP); + SK_IN32(IoC, Q_ADDR(QuIoOffs, Q_CSR), &QuCsr); } - return (QuCsr); -} /* TestStopBit*/ + + return(QuCsr); +} /* TestStopBit */ /****************************************************************************** @@ -1131,31 +1381,31 @@ * SkGeStopPort() - Stop the Rx/Tx activity of the port 'Port'. * * Description: - * After calling this function the descriptor rings and rx and tx + * After calling this function the descriptor rings and Rx and Tx * queues of this port may be reconfigured. * - * It is possible to stop the receive and transmit path seperate or + * It is possible to stop the receive and transmit path separate or * both together. * - * Dir = SK_STOP_TX Stops the transmit path only and resets - * the XMAC. The receive queue is still and - * the pending rx frames may still transfered + * Dir = SK_STOP_TX Stops the transmit path only and resets the MAC. + * The receive queue is still active and + * the pending Rx frames may be still transferred * into the RxD. * SK_STOP_RX Stop the receive path. The tansmit path - * has to be stoped once before. + * has to be stopped once before. * SK_STOP_ALL SK_STOP_TX + SK_STOP_RX * - * RstMode=SK_SOFT_RST Resets the XMAC. The PHY is still alive. - * SK_HARD_RST Resets the XMAC and the PHY. + * RstMode = SK_SOFT_RST Resets the MAC. The PHY is still alive. + * SK_HARD_RST Resets the MAC and the PHY. * * Example: * 1) A Link Down event was signaled for a port. Therefore the activity - * of this port should be stoped and a hardware reset should be issued + * of this port should be stopped and a hardware reset should be issued * to enable the workaround of XMAC errata #2. But the received frames * should not be discarded. * ... * SkGeStopPort(pAC, IoC, Port, SK_STOP_TX, SK_HARD_RST); - * (transfer all pending rx frames) + * (transfer all pending Rx frames) * SkGeStopPort(pAC, IoC, Port, SK_STOP_RX, SK_HARD_RST); * ... * @@ -1170,29 +1420,29 @@ * * Extended Description: * If SK_STOP_TX is set, - * o disable the XMACs receive and transmiter to prevent + * o disable the MAC's receive and transmitter to prevent * from sending incomplete frames * o stop the port's transmit queues before terminating the * BMUs to prevent from performing incomplete PCI cycles * on the PCI bus - * - The network rx and tx activity and PCI tx transfer is + * - The network Rx and Tx activity and PCI Tx transfer is * disabled now. - * o reset the XMAC depending on the RstMode + * o reset the MAC depending on the RstMode * o Stop Interval Timer and Limit Counter of Tx Arbiter, * also disable Force Sync bit and Enable Alloc bit. - * o perform a local reset of the port's tx path - * - reset the PCI FIFO of the async tx queue - * - reset the PCI FIFO of the sync tx queue - * - reset the RAM Buffer async tx queue - * - reset the RAM Butter sync tx queue + * o perform a local reset of the port's Tx path + * - reset the PCI FIFO of the async Tx queue + * - reset the PCI FIFO of the sync Tx queue + * - reset the RAM Buffer async Tx queue + * - reset the RAM Buffer sync Tx queue * - reset the MAC Tx FIFO * o switch Link and Tx LED off, stop the LED counters * * If SK_STOP_RX is set, * o stop the port's receive queue * - The path data transfer activity is fully stopped now. - * o perform a local reset of the port's rx path - * - reset the PCI FIFO of the rx queue + * o perform a local reset of the port's Rx path + * - reset the PCI FIFO of the Rx queue * - reset the RAM Buffer receive queue * - reset the MAC Rx FIFO * o switch Rx LED off, stop the LED counter @@ -1204,36 +1454,30 @@ * o This function may be called during the driver states RESET_PORT and * SWITCH_PORT. */ -void SkGeStopPort( +void SkGeStopPort( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* I/O context */ int Port, /* port to stop (MAC_1 + n) */ int Dir, /* Direction to Stop (SK_STOP_RX, SK_STOP_TX, SK_STOP_ALL) */ int RstMode)/* Reset Mode (SK_SOFT_RST, SK_HARD_RST) */ { -#ifndef SK_DIAG +#ifndef SK_DIAG SK_EVPARA Para; -#endif /* !SK_DIAG */ +#endif /* !SK_DIAG */ SK_GEPORT *pPrt; SK_U32 DWord; - SK_U16 Word; SK_U32 XsCsr; SK_U32 XaCsr; int i; - SK_BOOL AllPortsDis; SK_U64 ToutStart; int ToutCnt; pPrt = &pAC->GIni.GP[Port]; - if (Dir & SK_STOP_TX) { - /* disable the XMACs receiver and transmitter */ - XM_IN16(IoC, Port, XM_MMU_CMD, &Word); - XM_OUT16(IoC, Port, XM_MMU_CMD, Word & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); - - /* dummy read to ensure writing */ - XM_IN16(IoC, Port, XM_MMU_CMD, &Word); - + if ((Dir & SK_STOP_TX) != 0) { + /* disable receiver and transmitter */ + SkMacRxTxDisable(pAC, IoC, Port); + /* stop both transmit queues */ /* * If the BMU is in the reset state CSR_STOP will terminate @@ -1249,21 +1493,14 @@ * Clear packet arbiter timeout to make sure * this loop will terminate. */ - if (Port == MAC_1) { - Word = PA_CLR_TO_TX1; - } - else { - Word = PA_CLR_TO_TX2; - } - SK_OUT16(IoC, B3_PA_CTRL, Word); + SK_OUT16(IoC, B3_PA_CTRL, (Port == MAC_1) ? PA_CLR_TO_TX1 : + PA_CLR_TO_TX2); /* - * If the transfer stucks at the XMAC the STOP command will not - * terminate if we don't flush the XMAC's transmit FIFO! + * If the transfer stucks at the MAC the STOP command will not + * terminate if we don't flush the XMAC's transmit FIFO ! */ - XM_IN32(IoC, Port, XM_MODE, &DWord); - DWord |= XM_MD_FTF; - XM_OUT32(IoC, Port, XM_MODE, DWord); + SkMacFlushTxFifo(pAC, IoC, Port); XsCsr = TestStopBit(pAC, IoC, pPrt->PXsQOff); XaCsr = TestStopBit(pAC, IoC, pPrt->PXaQOff); @@ -1274,40 +1511,32 @@ * This needs to be checked at 1/18 sec only. */ ToutCnt++; - switch (ToutCnt) { - case 1: - /* - * Cache Incoherency workaround: Assume a start command - * has been lost while sending the frame. - */ - ToutStart = SkOsGetTime(pAC); - if (XsCsr & CSR_STOP) { - SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), CSR_START); - } - if (XaCsr & CSR_STOP) { - SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), CSR_START); - } - break; - case 2: - default: + if (ToutCnt > 1) { /* Might be a problem when the driver event handler - * calls StopPort again. - * XXX. + * calls StopPort again. XXX. */ /* Fatal Error, Loop aborted */ - /* Create an Error Log Entry */ - SK_ERR_LOG( - pAC, - SK_ERRCL_HW, - SKERR_HWI_E018, + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_HWI_E018, SKERR_HWI_E018MSG); #ifndef SK_DIAG Para.Para64 = Port; SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para); -#endif /* !SK_DIAG */ +#endif /* !SK_DIAG */ return; } + /* + * Cache incoherency workaround: Assume a start command + * has been lost while sending the frame. + */ + ToutStart = SkOsGetTime(pAC); + + if ((XsCsr & CSR_STOP) != 0) { + SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), CSR_START); + } + if ((XaCsr & CSR_STOP) != 0) { + SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), CSR_START); + } } /* @@ -1315,46 +1544,51 @@ * required to wait until CSR_STOP is reset and CSR_SV_IDLE is set. */ } while ((XsCsr & (CSR_STOP | CSR_SV_IDLE)) != CSR_SV_IDLE || - (XaCsr & (CSR_STOP | CSR_SV_IDLE)) != CSR_SV_IDLE); + (XaCsr & (CSR_STOP | CSR_SV_IDLE)) != CSR_SV_IDLE); - /* reset the XMAC depending on the RstMode */ + /* Reset the MAC depending on the RstMode */ if (RstMode == SK_SOFT_RST) { - SkXmSoftRst(pAC, IoC, Port); + SkMacSoftRst(pAC, IoC, Port); } else { - SkXmHardRst(pAC, IoC, Port); + SkMacHardRst(pAC, IoC, Port); } - - /* - * Stop Interval Timer and Limit Counter of Tx Arbiter, - * also disable Force Sync bit and Enable Alloc bit. - */ + + /* Disable Force Sync bit and Enable Alloc bit */ SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL), TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); - SK_OUT32(IoC, MR_ADDR(Port, TXA_ITI_INI), 0x00000000L); - SK_OUT32(IoC, MR_ADDR(Port, TXA_LIM_INI), 0x00000000L); + + /* Stop Interval Timer and Limit Counter of Tx Arbiter */ + SK_OUT32(IoC, MR_ADDR(Port, TXA_ITI_INI), 0L); + SK_OUT32(IoC, MR_ADDR(Port, TXA_LIM_INI), 0L); - /* - * perform a local reset of the port's tx path - * - reset the PCI FIFO of the async tx queue - * - reset the PCI FIFO of the sync tx queue - * - reset the RAM Buffer async tx queue - * - reset the RAM Butter sync tx queue - * - reset the MAC Tx FIFO - */ + /* Perform a local reset of the port's Tx path */ + + /* Reset the PCI FIFO of the async Tx queue */ SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), CSR_SET_RESET); + /* Reset the PCI FIFO of the sync Tx queue */ SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), CSR_SET_RESET); + /* Reset the RAM Buffer async Tx queue */ SK_OUT8(IoC, RB_ADDR(pPrt->PXaQOff, RB_CTRL), RB_RST_SET); + /* Reset the RAM Buffer sync Tx queue */ SK_OUT8(IoC, RB_ADDR(pPrt->PXsQOff, RB_CTRL), RB_RST_SET); - /* Note: MFF_RST_SET does NOT reset the XMAC! */ - SK_OUT8(IoC, MR_ADDR(Port, TX_MFF_CTRL2), MFF_RST_SET); - - /* switch Link and Tx LED off, stop the LED counters */ - /* Link LED is switched off by the RLMT and the Diag itself */ - SkGeXmitLED(pAC, IoC, MR_ADDR(Port, TX_LED_INI), SK_LED_DIS); + + /* Reset Tx MAC FIFO */ + if (pAC->GIni.GIGenesis) { + /* Note: MFF_RST_SET does NOT reset the XMAC ! */ + SK_OUT8(IoC, MR_ADDR(Port, TX_MFF_CTRL2), MFF_RST_SET); + + /* switch Link and Tx LED off, stop the LED counters */ + /* Link LED is switched off by the RLMT and the Diag itself */ + SkGeXmitLED(pAC, IoC, MR_ADDR(Port, TX_LED_INI), SK_LED_DIS); + } + else { + /* Reset TX MAC FIFO */ + SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), (SK_U8)GMF_RST_SET); + } } - if (Dir & SK_STOP_RX) { + if ((Dir & SK_STOP_RX) != 0) { /* * The RX Stop Command will not terminate if no buffers * are queued in the RxD ring. But it will always reach @@ -1363,64 +1597,52 @@ */ /* stop the port's receive queue */ SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), CSR_STOP); + i = 100; do { /* * Clear packet arbiter timeout to make sure * this loop will terminate */ - if (Port == MAC_1) { - Word = PA_CLR_TO_RX1; - } - else { - Word = PA_CLR_TO_RX2; - } - SK_OUT16(IoC, B3_PA_CTRL, Word); - + SK_OUT16(IoC, B3_PA_CTRL, (Port == MAC_1) ? PA_CLR_TO_RX1 : + PA_CLR_TO_RX2); + DWord = TestStopBit(pAC, IoC, pPrt->PRxQOff); - if (i != 0) { - i--; - } - /* finish if CSR_STOP is done or CSR_SV_IDLE is true and i==0 */ + /* timeout if i==0 (bug fix for #10748) */ + if (--i == 0) { + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_HWI_E024, + SKERR_HWI_E024MSG); + break; + } /* * because of the ASIC problem report entry from 21.08.98 * it is required to wait until CSR_STOP is reset and * CSR_SV_IDLE is set. */ - } while ((DWord & (CSR_STOP | CSR_SV_IDLE)) != CSR_SV_IDLE && - ((DWord & CSR_SV_IDLE) == 0 || i != 0)); + } while ((DWord & (CSR_STOP | CSR_SV_IDLE)) != CSR_SV_IDLE); - /* The path data transfer activity is fully stopped now. */ + /* The path data transfer activity is fully stopped now */ - /* - * perform a local reset of the port's rx path - * - reset the PCI FIFO of the rx queue - * - reset the RAM Buffer receive queue - * - reset the MAC Rx FIFO - */ + /* Perform a local reset of the port's Rx path */ + + /* Reset the PCI FIFO of the Rx queue */ SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), CSR_SET_RESET); + /* Reset the RAM Buffer receive queue */ SK_OUT8(IoC, RB_ADDR(pPrt->PRxQOff, RB_CTRL), RB_RST_SET); - SK_OUT8(IoC, MR_ADDR(Port, RX_MFF_CTRL2), MFF_RST_SET); - - /* switch Rx LED off, stop the LED counter */ - SkGeXmitLED(pAC, IoC, MR_ADDR(Port, RX_LED_INI), SK_LED_DIS); - - } - /* - * If all ports are stopped reset the RAM Interface. - */ - for (i = 0, AllPortsDis = SK_TRUE; i < pAC->GIni.GIMacsFound; i++) { - if (pAC->GIni.GP[i].PState != SK_PRT_RESET && - pAC->GIni.GP[i].PState != SK_PRT_STOP) { + /* Reset Rx MAC FIFO */ + if (pAC->GIni.GIGenesis) { + + SK_OUT8(IoC, MR_ADDR(Port, RX_MFF_CTRL2), MFF_RST_SET); - AllPortsDis = SK_FALSE; - break; + /* switch Rx LED off, stop the LED counter */ + SkGeXmitLED(pAC, IoC, MR_ADDR(Port, RX_LED_INI), SK_LED_DIS); + } + else { + /* Reset Rx MAC FIFO */ + SK_OUT8(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), (SK_U8)GMF_RST_SET); } - } - if (AllPortsDis) { - pAC->GIni.GIAnyPortAct = SK_FALSE; } } /* SkGeStopPort */ @@ -1444,12 +1666,12 @@ for (i = 0; i < SK_MAX_MACS; i++) { pPrt = &pAC->GIni.GP[i]; + pPrt->PState = SK_PRT_RESET; pPrt->PRxQOff = QOffTab[i].RxQOff; pPrt->PXsQOff = QOffTab[i].XsQOff; pPrt->PXaQOff = QOffTab[i].XaQOff; pPrt->PCheckPar = SK_FALSE; - pPrt->PRxCmd = XM_RX_STRIP_FCS | XM_RX_LENERR_OK; pPrt->PIsave = 0; pPrt->PPrevShorts = 0; pPrt->PLinkResCt = 0; @@ -1458,6 +1680,9 @@ pPrt->PPrevFcs = 0; pPrt->PRxLim = SK_DEF_RX_WA_LIM; pPrt->PLinkMode = SK_LMODE_AUTOFULL; + pPrt->PLinkSpeedCap = SK_LSPEED_CAP_1000MBPS; + pPrt->PLinkSpeed = SK_LSPEED_1000MBPS; + pPrt->PLinkSpeedUsed = SK_LSPEED_STAT_UNKNOWN; pPrt->PLinkModeConf = SK_LMODE_AUTOSENSE; pPrt->PFlowCtrlMode = SK_FLOW_MODE_SYM_OR_REM; pPrt->PLinkBroken = SK_TRUE; /* See WA code */ @@ -1466,8 +1691,7 @@ pPrt->PLinkModeStatus = SK_LMODE_STAT_UNKNOWN; pPrt->PFlowCtrlCap = SK_FLOW_MODE_SYM_OR_REM; pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE; - pPrt->PMSCap = (SK_MS_CAP_AUTO | SK_MS_CAP_MASTER | - SK_MS_CAP_SLAVE); + pPrt->PMSCap = 0; pPrt->PMSMode = SK_MS_MODE_AUTO; pPrt->PMSStatus = SK_MS_STAT_UNSET; pPrt->PAutoNegFail = SK_FALSE; @@ -1476,7 +1700,7 @@ } pAC->GIni.GIPortUsage = SK_RED_LINK; - pAC->GIni.GIAnyPortAct = SK_FALSE; + } /* SkGeInit0*/ #ifdef SK_PCI_RESET @@ -1520,42 +1744,39 @@ /* We know the RAM Interface Arbiter is enabled. */ SkPciWriteCfgWord(pAC, PCI_PM_CTL_STS, PCI_PM_STATE_D3); SkPciReadCfgWord(pAC, PCI_PM_CTL_STS, &PmCtlSts); - if ((PmCtlSts & PCI_PM_STATE) != PCI_PM_STATE_D3) { - return (1); + + if ((PmCtlSts & PCI_PM_STATE_MSK) != PCI_PM_STATE_D3) { + return(1); } - /* - * Return to D0 state. - */ + /* Return to D0 state. */ SkPciWriteCfgWord(pAC, PCI_PM_CTL_STS, PCI_PM_STATE_D0); /* Check for D0 state. */ SkPciReadCfgWord(pAC, PCI_PM_CTL_STS, &PmCtlSts); - if ((PmCtlSts & PCI_PM_STATE) != PCI_PM_STATE_D0) { - return (1); + + if ((PmCtlSts & PCI_PM_STATE_MSK) != PCI_PM_STATE_D0) { + return(1); } - /* - * Check PCI Config Registers. - */ + /* Check PCI Config Registers. */ SkPciReadCfgWord(pAC, PCI_COMMAND, &PciCmd); SkPciReadCfgByte(pAC, PCI_CACHE_LSZ, &Cls); SkPciReadCfgDWord(pAC, PCI_BASE_1ST, &Bp1); SkPciReadCfgDWord(pAC, PCI_BASE_2ND, &Bp2); - SkPciReadCfgByte(pAC, PCI_LAT_TIM, &lat); + SkPciReadCfgByte(pAC, PCI_LAT_TIM, &Lat); + if (PciCmd != 0 || Cls != 0 || (Bp1 & 0xfffffff0L) != 0 || Bp2 != 1 || - Lat != 0 ) { - return (0); + Lat != 0) { + return(1); } - /* - * Restore Config Space. - */ + /* Restore PCI Config Space. */ for (i = 0; i < PCI_CFG_SIZE; i++) { SkPciWriteCfgDWord(pAC, i*4, ConfigSpace[i]); } - return (0); + return(0); } /* SkGePciReset */ #endif /* SK_PCI_RESET */ @@ -1572,13 +1793,14 @@ * o Get the hardware configuration * + Read the number of MACs/Ports. * + Read the RAM size. - * + Read the PCI Revision ID. + * + Read the PCI Revision Id. * + Find out the adapters host clock speed * + Read and check the PHY type * * Returns: * 0: success * 5: Unexpected PHY type detected + * 6: HW self test failed */ static int SkGeInit1( SK_AC *pAC, /* adapter context */ @@ -1595,56 +1817,113 @@ (void)SkGePciReset(pAC, IoC); #endif /* SK_PCI_RESET */ - /* Do the reset */ + /* Do the SW-reset */ SK_OUT8(IoC, B0_CTST, CS_RST_SET); - /* Release the reset */ + /* Release the SW-reset */ SK_OUT8(IoC, B0_CTST, CS_RST_CLR); /* Reset all error bits in the PCI STATUS register */ /* - * Note: Cfg cycles cannot be used, because they are not + * Note: PCI Cfg cycles cannot be used, because they are not * available on some platforms after 'boot time'. */ - SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON); SK_IN16(IoC, PCI_C(PCI_STATUS), &Word); + + SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON); SK_OUT16(IoC, PCI_C(PCI_STATUS), Word | PCI_ERRBITS); SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF); - /* Release Master_Reset */ + /* Release Master Reset */ SK_OUT8(IoC, B0_CTST, CS_MRST_CLR); + /* Read Chip Identification Number */ + SK_IN8(IoC, B2_CHIP_ID, &Byte); + pAC->GIni.GIChipId = Byte; + /* Read number of MACs */ SK_IN8(IoC, B2_MAC_CFG, &Byte); - if (Byte & CFG_SNG_MAC) { - pAC->GIni.GIMacsFound = 1; - } - else { - pAC->GIni.GIMacsFound = 2; - } - SK_IN8(IoC, PCI_C(PCI_REV_ID), &Byte); - pAC->GIni.GIPciHwRev = (int) Byte; + pAC->GIni.GIMacsFound = (Byte & CFG_SNG_MAC) ? 1 : 2; + + /* Get Chip Revision Number */ + pAC->GIni.GIChipRev = (SK_U8)((Byte & CFG_CHIP_R_MSK) >> 4); - /* Read the adapters RAM size */ + /* Read the adapters external SRAM size */ SK_IN8(IoC, B2_E_0, &Byte); - if (Byte == 3) { - pAC->GIni.GIRamSize = (int)(Byte-1) * 512; - pAC->GIni.GIRamOffs = (SK_U32)512 * 1024; + + if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) { + + pAC->GIni.GIGenesis = SK_TRUE; + + if (Byte == 3) { + /* special case: 4 x 64k x 36, offset = 0x80000 */ + pAC->GIni.GIRamSize = 1024; + pAC->GIni.GIRamOffs = (SK_U32)512 * 1024; + } + else { + pAC->GIni.GIRamSize = (int)Byte * 512; + pAC->GIni.GIRamOffs = 0; + } } else { - pAC->GIni.GIRamSize = (int)Byte * 512; + + pAC->GIni.GIGenesis = SK_FALSE; + +#ifndef VCPU + pAC->GIni.GIRamSize = (Byte == 0) ? 128 : (int)Byte * 4; +#else + pAC->GIni.GIRamSize = 128; +#endif pAC->GIni.GIRamOffs = 0; + + pAC->GIni.GIWolOffs = (pAC->GIni.GIChipRev == 0) ? WOL_REG_OFFS : 0; + + for (i = 0; i < pAC->GIni.GIMacsFound; i++) { + /* set GMAC Link Control reset */ + SK_OUT16(IoC, MR_ADDR(i, GMAC_LINK_CTRL), GMLC_RST_SET); + + /* clear GMAC Link Control reset */ + SK_OUT16(IoC, MR_ADDR(i, GMAC_LINK_CTRL), GMLC_RST_CLR); + } } - /* All known GE Adapters works with 53.125 MHz host clock */ + /* get diff. PCI parameters */ + SK_IN16(IoC, B0_CTST, &Word); + + /* Check if 64-bit PCI Slot is present */ + pAC->GIni.GIPciSlot64 = (SK_BOOL)((Word & CS_BUS_SLOT_SZ) != 0); + + /* Check if 66 MHz PCI Clock is active */ + pAC->GIni.GIPciClock66 = (SK_BOOL)((Word & CS_BUS_CLOCK) != 0); + + /* Check if VAUX is available */ + pAC->GIni.GIVauxAvail = (SK_BOOL)((Word & CS_VAUX_AVAIL) != 0); + + /* Read PCI HW Revision Id. */ + SK_IN8(IoC, PCI_C(PCI_REV_ID), &Byte); + pAC->GIni.GIPciHwRev = Byte; + + /* All known GE Adapters work with 53.125 MHz host clock */ pAC->GIni.GIHstClkFact = SK_FACT_53; pAC->GIni.GIPollTimerVal = SK_DPOLL_DEF * (SK_U32)pAC->GIni.GIHstClkFact / 100; + /* Read the PMD type */ + SK_IN8(IoC, B2_PMD_TYP, &Byte); + pAC->GIni.GICopperType = (SK_U8)(Byte == 'T'); + /* Read the PHY type */ SK_IN8(IoC, B2_E_1, &Byte); + +#ifdef VCPU + if (!pAC->GIni.GIGenesis) { + pAC->GIni.GICopperType = SK_TRUE; + Byte = SK_PHY_MARV_COPPER; /* this field is not initialized */ + } +#endif + Byte &= 0x0f; /* the PHY type is stored in the lower nibble */ - for (i=0; iGIni.GIMacsFound; i++) { + for (i = 0; i < pAC->GIni.GIMacsFound; i++) { pAC->GIni.GP[i].PhyType = Byte; switch (Byte) { case SK_PHY_XMAC: @@ -1652,25 +1931,71 @@ break; case SK_PHY_BCOM: pAC->GIni.GP[i].PhyAddr = PHY_ADDR_BCOM; + pAC->GIni.GP[i].PMSCap = + SK_MS_CAP_AUTO | SK_MS_CAP_MASTER | SK_MS_CAP_SLAVE; break; + case SK_PHY_MARV_COPPER: + pAC->GIni.GP[i].PhyAddr = PHY_ADDR_MARV; + if (pAC->GIni.GICopperType) { + pAC->GIni.GP[i].PLinkSpeedCap = SK_LSPEED_CAP_AUTO | + SK_LSPEED_CAP_10MBPS | SK_LSPEED_CAP_100MBPS | + SK_LSPEED_CAP_1000MBPS; + pAC->GIni.GP[i].PLinkSpeed = SK_LSPEED_AUTO; + pAC->GIni.GP[i].PMSCap = + SK_MS_CAP_AUTO | SK_MS_CAP_MASTER | SK_MS_CAP_SLAVE; + } + else { + pAC->GIni.GP[i].PhyType = SK_PHY_MARV_FIBER; + } + break; +#ifdef OTHER_PHY case SK_PHY_LONE: pAC->GIni.GP[i].PhyAddr = PHY_ADDR_LONE; break; case SK_PHY_NAT: pAC->GIni.GP[i].PhyAddr = PHY_ADDR_NAT; break; +#endif /* OTHER_PHY */ default: - /* ERROR: unexpected PHY typ detected */ + /* ERROR: unexpected PHY type detected */ RetVal = 5; break; } + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_INIT, + ("PHY type: %d PHY addr: %04x\n", pAC->GIni.GP[i].PhyType, + pAC->GIni.GP[i].PhyAddr)); } - SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_INIT, - ("PHY type: %d PHY addr: %x\n", pAC->GIni.GP[i].PhyType, - pAC->GIni.GP[i].PhyAddr)); + + /* Get Mac Type & set function pointers dependent on */ + if (pAC->GIni.GIGenesis) { + pAC->GIni.GIMacType = SK_MAC_XMAC; + + pAC->GIni.GIFunc.pFnMacUpdateStats = SkXmUpdateStats; + pAC->GIni.GIFunc.pFnMacStatistic = SkXmMacStatistic; + pAC->GIni.GIFunc.pFnMacResetCounter = SkXmResetCounter; + pAC->GIni.GIFunc.pFnMacOverflow = SkXmOverflowStatus; + } + else { + pAC->GIni.GIMacType = SK_MAC_GMAC; - return (RetVal); -} /* SkGeInit1*/ + pAC->GIni.GIFunc.pFnMacUpdateStats = SkGmUpdateStats; + pAC->GIni.GIFunc.pFnMacStatistic = SkGmMacStatistic; + pAC->GIni.GIFunc.pFnMacResetCounter = SkGmResetCounter; + pAC->GIni.GIFunc.pFnMacOverflow = SkGmOverflowStatus; + +#ifndef VCPU + if (pAC->GIni.GIChipId == CHIP_ID_YUKON) { + /* check HW self test result */ + SK_IN8(IoC, B2_E_3, &Byte); + if ((Byte & B2_E3_RES_MASK) != 0) { + RetVal = 6; + } + } +#endif + } + return(RetVal); +} /* SkGeInit1 */ /****************************************************************************** @@ -1692,64 +2017,48 @@ SK_AC *pAC, /* adapter context */ SK_IOC IoC) /* IO context */ { - SK_GEPORT *pPrt; SK_U32 DWord; - int i; - - /* start the Blink Source Counter */ - DWord = SK_BLK_DUR * (SK_U32)pAC->GIni.GIHstClkFact / 100; - SK_OUT32(IoC, B2_BSC_INI, DWord); - SK_OUT8(IoC, B2_BSC_CTRL, BSC_START); + int i; /* start the Descriptor Poll Timer */ if (pAC->GIni.GIPollTimerVal != 0) { if (pAC->GIni.GIPollTimerVal > SK_DPOLL_MAX) { pAC->GIni.GIPollTimerVal = SK_DPOLL_MAX; - /* Create an Error Log Entry */ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E017, SKERR_HWI_E017MSG); } SK_OUT32(IoC, B28_DPT_INI, pAC->GIni.GIPollTimerVal); SK_OUT8(IoC, B28_DPT_CTRL, DPT_START); } - /* - * Configure - * - the MAC-Arbiter and - * - the Paket Arbiter - * - * The MAC and the packet arbiter will be started once - * and never be stopped. - */ - SkGeInitMacArb(pAC, IoC); - SkGeInitPktArb(pAC, IoC); + if (pAC->GIni.GIGenesis) { + /* start the Blink Source Counter */ + DWord = SK_BLK_DUR * (SK_U32)pAC->GIni.GIHstClkFact / 100; + + SK_OUT32(IoC, B2_BSC_INI, DWord); + SK_OUT8(IoC, B2_BSC_CTRL, BSC_START); + + /* + * Configure the MAC Arbiter and the Packet Arbiter. + * They will be started once and never be stopped. + */ + SkGeInitMacArb(pAC, IoC); + + SkGeInitPktArb(pAC, IoC); + } + else { + /* Start Time Stamp Timer */ + SK_OUT8(IoC, GMAC_TI_ST_CTRL, (SK_U8)GMT_ST_START); + } /* enable the Tx Arbiters */ - SK_OUT8(IoC, MR_ADDR(MAC_1, TXA_CTRL), TXA_ENA_ARB); - if (pAC->GIni.GIMacsFound > 1) { - SK_OUT8(IoC, MR_ADDR(MAC_2, TXA_CTRL), TXA_ENA_ARB); + for (i = 0; i < pAC->GIni.GIMacsFound; i++) { + SK_OUT8(IoC, MR_ADDR(i, TXA_CTRL), TXA_ENA_ARB); } /* enable the RAM Interface Arbiter */ SkGeInitRamIface(pAC, IoC); - for (i = 0; i < SK_MAX_MACS; i++) { - pPrt = &pAC->GIni.GP[i]; - if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) { - pPrt->PRxCmd |= XM_RX_BIG_PK_OK; - } - - if (pPrt->PLinkModeConf == SK_LMODE_HALF) { - /* - * If in manual half duplex mode - * the other side might be in full duplex mode - * so ignore if a carrier extension is not seen on - * frames received - */ - pPrt->PRxCmd |= XM_RX_DIS_CEXT; - } - - } } /* SkGeInit2 */ /****************************************************************************** @@ -1758,9 +2067,8 @@ * * Description: * Level 0: Initialize the Module structures. - * Level 1: Generic Hardware Initialization. The - * IOP/MemBase pointer has to be set before - * calling this level. + * Level 1: Generic Hardware Initialization. The IOP/MemBase pointer has + * to be set before calling this level. * * o Do a software reset. * o Clear all reset bits. @@ -1780,18 +2088,19 @@ * * Returns: * 0: success - * 1: Number of MACs exceeds SK_MAX_MACS ( after level 1) - * 2: Adapter not present or not accessable + * 1: Number of MACs exceeds SK_MAX_MACS (after level 1) + * 2: Adapter not present or not accessible * 3: Illegal initialization level * 4: Initialization Level 1 Call missing * 5: Unexpected PHY type detected + * 6: HW self test failed */ int SkGeInit( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ int Level) /* initialization level */ { - int RetVal; /* return value */ + int RetVal; /* return value */ SK_U32 DWord; RetVal = 0; @@ -1804,14 +2113,19 @@ SkGeInit0(pAC, IoC); pAC->GIni.GILevel = SK_INIT_DATA; break; + case SK_INIT_IO: /* Initialization Level 1 */ RetVal = SkGeInit1(pAC, IoC); + if (RetVal != 0) { + break; + } - /* Check if the adapter seems to be accessable */ + /* Check if the adapter seems to be accessible */ SK_OUT32(IoC, B2_IRQM_INI, 0x11335577L); SK_IN32(IoC, B2_IRQM_INI, &DWord); - SK_OUT32(IoC, B2_IRQM_INI, 0x00000000L); + SK_OUT32(IoC, B2_IRQM_INI, 0L); + if (DWord != 0x11335577L) { RetVal = 2; break; @@ -1826,12 +2140,13 @@ /* Level 1 successfully passed */ pAC->GIni.GILevel = SK_INIT_IO; break; + case SK_INIT_RUN: /* Initialization Level 2 */ if (pAC->GIni.GILevel != SK_INIT_IO) { -#ifndef SK_DIAG +#ifndef SK_DIAG SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E002, SKERR_HWI_E002MSG); -#endif +#endif /* !SK_DIAG */ RetVal = 4; break; } @@ -1840,15 +2155,15 @@ /* Level 2 successfully passed */ pAC->GIni.GILevel = SK_INIT_RUN; break; + default: - /* Create an Error Log Entry */ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E003, SKERR_HWI_E003MSG); RetVal = 3; break; } - return (RetVal); -} /* SkGeInit*/ + return(RetVal); +} /* SkGeInit */ /****************************************************************************** @@ -1862,15 +2177,17 @@ * Returns: * nothing */ -void SkGeDeInit( +void SkGeDeInit( SK_AC *pAC, /* adapter context */ SK_IOC IoC) /* IO context */ { int i; SK_U16 Word; - /* Ensure I2C is ready. */ +#ifndef VCPU + /* Ensure I2C is ready */ SkI2cWaitIrq(pAC, IoC); +#endif /* Stop all current transfer activity */ for (i = 0; i < pAC->GIni.GIMacsFound; i++) { @@ -1883,39 +2200,39 @@ /* Reset all bits in the PCI STATUS register */ /* - * Note: Cfg cycles cannot be used, because they are not + * Note: PCI Cfg cycles cannot be used, because they are not * available on some platforms after 'boot time'. */ - SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON); SK_IN16(IoC, PCI_C(PCI_STATUS), &Word); + + SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON); SK_OUT16(IoC, PCI_C(PCI_STATUS), Word | PCI_ERRBITS); SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF); /* Do the reset, all LEDs are switched off now */ SK_OUT8(IoC, B0_CTST, CS_RST_SET); -} /* SkGeDeInit*/ +} /* SkGeDeInit */ /****************************************************************************** * - * SkGeInitPort() Initialize the specified prot. + * SkGeInitPort() Initialize the specified port. * * Description: * PRxQSize, PXSQSize, and PXAQSize has to be - * configured for the specified port before calling this - * function. The descriptor rings has to be initialized, too. + * configured for the specified port before calling this function. + * The descriptor rings has to be initialized too. * * o (Re)configure queues of the specified port. - * o configure the XMAC of the specified port. - * o put ASIC and XMAC(s) in operational mode. + * o configure the MAC of the specified port. + * o put ASIC and MAC(s) in operational mode. * o initialize Rx/Tx and Sync LED * o initialize RAM Buffers and MAC FIFOs * * The port is ready to connect when returning. * * Note: - * The XMACs Rx and Tx state machine is still disabled when - * returning. + * The MAC's Rx and Tx state machine is still disabled when returning. * * Returns: * 0: success @@ -1936,45 +2253,48 @@ if (SkGeCheckQSize(pAC, Port) != 0) { SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E004, SKERR_HWI_E004MSG); - return (1); + return(1); } + if (pPrt->PState == SK_PRT_INIT || pPrt->PState == SK_PRT_RUN) { SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E005, SKERR_HWI_E005MSG); - return (2); + return(2); } /* Configuration ok, initialize the Port now */ - /* Initialize Rx, Tx and Link LED */ - /* - * If 1000BT Phy needs LED initialization than swap - * LED and XMAC initialization order - */ - SkGeXmitLED(pAC, IoC, MR_ADDR(Port, TX_LED_INI), SK_LED_ENA); - SkGeXmitLED(pAC, IoC, MR_ADDR(Port, RX_LED_INI), SK_LED_ENA); - /* The Link LED is initialized by RLMT or Diagnostics itself */ + if (pAC->GIni.GIGenesis) { + /* Initialize Rx, Tx and Link LED */ + /* + * If 1000BT Phy needs LED initialization than swap + * LED and XMAC initialization order + */ + SkGeXmitLED(pAC, IoC, MR_ADDR(Port, TX_LED_INI), SK_LED_ENA); + SkGeXmitLED(pAC, IoC, MR_ADDR(Port, RX_LED_INI), SK_LED_ENA); + /* The Link LED is initialized by RLMT or Diagnostics itself */ + + SkXmInitMac(pAC, IoC, Port); + } + else { + SkGmInitMac(pAC, IoC, Port); + } + /* Do NOT initialize the Link Sync Counter */ - /* - * Configure - * - XMAC - * - MAC FIFOs - * - RAM Buffers - * - enable Force Sync bit if synchronous queue available - * - BMUs - */ - SkXmInitMac(pAC, IoC, Port); SkGeInitMacFifo(pAC, IoC, Port); + SkGeInitRamBufs(pAC, IoC, Port); + if (pPrt->PXSQSize != 0) { + /* enable Force Sync bit if synchronous queue available */ SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL), TXA_ENA_FSYNC); } + SkGeInitBmu(pAC, IoC, Port); - /* Mark port as initialized. */ + /* Mark port as initialized */ pPrt->PState = SK_PRT_INIT; - pAC->GIni.GIAnyPortAct = SK_TRUE; - return (0); + return(0); } /* SkGeInitPort */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/skgemib.c linux.21pre4-ac2/drivers/net/sk98lin/skgemib.c --- linux.21pre4/drivers/net/sk98lin/skgemib.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/net/sk98lin/skgemib.c 2003-01-06 15:38:18.000000000 +0000 @@ -0,0 +1,1056 @@ +/***************************************************************************** + * + * Name: skgemib.c + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.7 $ + * Date: $Date: 2002/12/16 09:04:34 $ + * Purpose: Private Network Management Interface Management Database + * + ****************************************************************************/ + +/****************************************************************************** + * + * (C)Copyright 2002 SysKonnect GmbH. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The information in this file is provided "AS IS" without warranty. + * + ******************************************************************************/ + +/***************************************************************************** + * + * History: + * + * $Log: skgemib.c,v $ + * Revision 1.7 2002/12/16 09:04:34 tschilli + * Code for VCT handling added. + * + * Revision 1.6 2002/08/09 15:40:21 rwahl + * Editorial change (renamed ConfSpeedCap). + * + * Revision 1.5 2002/08/09 11:05:34 rwahl + * Added oid handling for link speed cap. + * + * Revision 1.4 2002/08/09 09:40:27 rwahl + * Added support for NDIS OID_PNP_xxx. + * + * Revision 1.3 2002/07/17 19:39:54 rwahl + * Added handler for OID_SKGE_SPEED_MODE & OID_SKGE_SPEED_STATUS. + * + * Revision 1.2 2002/05/22 08:59:00 rwahl + * - static functions only for release build. + * - Source file must be included. + * + * Revision 1.1 2002/05/22 08:12:42 rwahl + * Initial version. + * + ****************************************************************************/ + +/* + * PRIVATE OID handler function prototypes + */ +PNMI_STATIC int Addr(SK_AC *pAC, SK_IOC IoC, int action, + SK_U32 Id, char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int CsumStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int General(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int Mac8023Stat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int MacPrivateConf(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int MacPrivateStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int Monitor(SK_AC *pAC, SK_IOC IoC, int action, + SK_U32 Id, char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int OidStruct(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int Perform(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int* pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int Rlmt(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int RlmtStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int SensorStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int Vpd(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC int Vct(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); + +#ifdef SK_POWER_MGMT +PNMI_STATIC int PowerManagement(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, + char *pBuf, unsigned int *pLen, SK_U32 Instance, + unsigned int TableIndex, SK_U32 NetIndex); +#endif + + + +/* defines *******************************************************************/ +#define ID_TABLE_SIZE (sizeof(IdTable)/sizeof(IdTable[0])) + + +/* global variables **********************************************************/ + +/* + * Table to correlate OID with handler function and index to + * hardware register stored in StatAddress if applicable. + */ +PNMI_STATIC const SK_PNMI_TAB_ENTRY IdTable[] = { + {OID_GEN_XMIT_OK, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX}, + {OID_GEN_RCV_OK, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX}, + {OID_GEN_XMIT_ERROR, + 0, + 0, + 0, + SK_PNMI_RO, General, 0}, + {OID_GEN_RCV_ERROR, + 0, + 0, + 0, + SK_PNMI_RO, General, 0}, + {OID_GEN_RCV_NO_BUFFER, + 0, + 0, + 0, + SK_PNMI_RO, General, 0}, + {OID_GEN_DIRECTED_FRAMES_XMIT, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_UNICAST}, + {OID_GEN_MULTICAST_FRAMES_XMIT, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_MULTICAST}, + {OID_GEN_BROADCAST_FRAMES_XMIT, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_BROADCAST}, + {OID_GEN_DIRECTED_FRAMES_RCV, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_UNICAST}, + {OID_GEN_MULTICAST_FRAMES_RCV, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_MULTICAST}, + {OID_GEN_BROADCAST_FRAMES_RCV, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_BROADCAST}, + {OID_GEN_RCV_CRC_ERROR, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_FCS}, + {OID_GEN_TRANSMIT_QUEUE_LENGTH, + 0, + 0, + 0, + SK_PNMI_RO, General, 0}, + {OID_802_3_PERMANENT_ADDRESS, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, 0}, + {OID_802_3_CURRENT_ADDRESS, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, 0}, + {OID_802_3_RCV_ERROR_ALIGNMENT, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_FRAMING}, + {OID_802_3_XMIT_ONE_COLLISION, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_SINGLE_COL}, + {OID_802_3_XMIT_MORE_COLLISIONS, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_MULTI_COL}, + {OID_802_3_XMIT_DEFERRED, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_DEFFERAL}, + {OID_802_3_XMIT_MAX_COLLISIONS, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_EXCESS_COL}, + {OID_802_3_RCV_OVERRUN, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_OVERFLOW}, + {OID_802_3_XMIT_UNDERRUN, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_UNDERRUN}, + {OID_802_3_XMIT_TIMES_CRS_LOST, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_CARRIER}, + {OID_802_3_XMIT_LATE_COLLISIONS, + 0, + 0, + 0, + SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_LATE_COL}, +#ifdef SK_POWER_MGMT + {OID_PNP_CAPABILITIES, + 0, + 0, + 0, + SK_PNMI_RO, PowerManagement, 0}, + {OID_PNP_SET_POWER, + 0, + 0, + 0, + SK_PNMI_WO, PowerManagement, 0}, + {OID_PNP_QUERY_POWER, + 0, + 0, + 0, + SK_PNMI_RO, PowerManagement, 0}, + {OID_PNP_ADD_WAKE_UP_PATTERN, + 0, + 0, + 0, + SK_PNMI_WO, PowerManagement, 0}, + {OID_PNP_REMOVE_WAKE_UP_PATTERN, + 0, + 0, + 0, + SK_PNMI_WO, PowerManagement, 0}, + {OID_PNP_ENABLE_WAKE_UP, + 0, + 0, + 0, + SK_PNMI_RW, PowerManagement, 0}, +#endif /* SK_POWER_MGMT */ + {OID_SKGE_MDB_VERSION, + 1, + 0, + SK_PNMI_MAI_OFF(MgmtDBVersion), + SK_PNMI_RO, General, 0}, + {OID_SKGE_SUPPORTED_LIST, + 0, + 0, + 0, + SK_PNMI_RO, General, 0}, + {OID_SKGE_ALL_DATA, + 0, + 0, + 0, + SK_PNMI_RW, OidStruct, 0}, + {OID_SKGE_VPD_FREE_BYTES, + 1, + 0, + SK_PNMI_MAI_OFF(VpdFreeBytes), + SK_PNMI_RO, Vpd, 0}, + {OID_SKGE_VPD_ENTRIES_LIST, + 1, + 0, + SK_PNMI_MAI_OFF(VpdEntriesList), + SK_PNMI_RO, Vpd, 0}, + {OID_SKGE_VPD_ENTRIES_NUMBER, + 1, + 0, + SK_PNMI_MAI_OFF(VpdEntriesNumber), + SK_PNMI_RO, Vpd, 0}, + {OID_SKGE_VPD_KEY, + SK_PNMI_VPD_ENTRIES, + sizeof(SK_PNMI_VPD), + SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdKey), + SK_PNMI_RO, Vpd, 0}, + {OID_SKGE_VPD_VALUE, + SK_PNMI_VPD_ENTRIES, + sizeof(SK_PNMI_VPD), + SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdValue), + SK_PNMI_RO, Vpd, 0}, + {OID_SKGE_VPD_ACCESS, + SK_PNMI_VPD_ENTRIES, + sizeof(SK_PNMI_VPD), + SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdAccess), + SK_PNMI_RO, Vpd, 0}, + {OID_SKGE_VPD_ACTION, + SK_PNMI_VPD_ENTRIES, + sizeof(SK_PNMI_VPD), + SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdAction), + SK_PNMI_RW, Vpd, 0}, + {OID_SKGE_PORT_NUMBER, + 1, + 0, + SK_PNMI_MAI_OFF(PortNumber), + SK_PNMI_RO, General, 0}, + {OID_SKGE_DEVICE_TYPE, + 1, + 0, + SK_PNMI_MAI_OFF(DeviceType), + SK_PNMI_RO, General, 0}, + {OID_SKGE_DRIVER_DESCR, + 1, + 0, + SK_PNMI_MAI_OFF(DriverDescr), + SK_PNMI_RO, General, 0}, + {OID_SKGE_DRIVER_VERSION, + 1, + 0, + SK_PNMI_MAI_OFF(DriverVersion), + SK_PNMI_RO, General, 0}, + {OID_SKGE_HW_DESCR, + 1, + 0, + SK_PNMI_MAI_OFF(HwDescr), + SK_PNMI_RO, General, 0}, + {OID_SKGE_HW_VERSION, + 1, + 0, + SK_PNMI_MAI_OFF(HwVersion), + SK_PNMI_RO, General, 0}, + {OID_SKGE_CHIPSET, + 1, + 0, + SK_PNMI_MAI_OFF(Chipset), + SK_PNMI_RO, General, 0}, + {OID_SKGE_ACTION, + 1, + 0, + SK_PNMI_MAI_OFF(Action), + SK_PNMI_RW, Perform, 0}, + {OID_SKGE_RESULT, + 1, + 0, + SK_PNMI_MAI_OFF(TestResult), + SK_PNMI_RO, General, 0}, + {OID_SKGE_BUS_TYPE, + 1, + 0, + SK_PNMI_MAI_OFF(BusType), + SK_PNMI_RO, General, 0}, + {OID_SKGE_BUS_SPEED, + 1, + 0, + SK_PNMI_MAI_OFF(BusSpeed), + SK_PNMI_RO, General, 0}, + {OID_SKGE_BUS_WIDTH, + 1, + 0, + SK_PNMI_MAI_OFF(BusWidth), + SK_PNMI_RO, General, 0}, + {OID_SKGE_TX_SW_QUEUE_LEN, + 1, + 0, + SK_PNMI_MAI_OFF(TxSwQueueLen), + SK_PNMI_RO, General, 0}, + {OID_SKGE_TX_SW_QUEUE_MAX, + 1, + 0, + SK_PNMI_MAI_OFF(TxSwQueueMax), + SK_PNMI_RO, General, 0}, + {OID_SKGE_TX_RETRY, + 1, + 0, + SK_PNMI_MAI_OFF(TxRetryCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_RX_INTR_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(RxIntrCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_TX_INTR_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(TxIntrCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_RX_NO_BUF_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(RxNoBufCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_TX_NO_BUF_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(TxNoBufCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_TX_USED_DESCR_NO, + 1, + 0, + SK_PNMI_MAI_OFF(TxUsedDescrNo), + SK_PNMI_RO, General, 0}, + {OID_SKGE_RX_DELIVERED_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(RxDeliveredCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_RX_OCTETS_DELIV_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(RxOctetsDeliveredCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_RX_HW_ERROR_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(RxHwErrorsCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_TX_HW_ERROR_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(TxHwErrorsCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_IN_ERRORS_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(InErrorsCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_OUT_ERROR_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(OutErrorsCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_ERR_RECOVERY_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(ErrRecoveryCts), + SK_PNMI_RO, General, 0}, + {OID_SKGE_SYSUPTIME, + 1, + 0, + SK_PNMI_MAI_OFF(SysUpTime), + SK_PNMI_RO, General, 0}, + {OID_SKGE_SENSOR_NUMBER, + 1, + 0, + SK_PNMI_MAI_OFF(SensorNumber), + SK_PNMI_RO, General, 0}, + {OID_SKGE_SENSOR_INDEX, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorIndex), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_DESCR, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorDescr), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_TYPE, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorType), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_VALUE, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorValue), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_WAR_THRES_LOW, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningThresholdLow), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_WAR_THRES_UPP, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningThresholdHigh), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_ERR_THRES_LOW, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorThresholdLow), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_ERR_THRES_UPP, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorThresholdHigh), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_STATUS, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorStatus), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_WAR_CTS, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningCts), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_ERR_CTS, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorCts), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_WAR_TIME, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningTimestamp), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_SENSOR_ERR_TIME, + SK_PNMI_SENSOR_ENTRIES, + sizeof(SK_PNMI_SENSOR), + SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorTimestamp), + SK_PNMI_RO, SensorStat, 0}, + {OID_SKGE_CHKSM_NUMBER, + 1, + 0, + SK_PNMI_MAI_OFF(ChecksumNumber), + SK_PNMI_RO, General, 0}, + {OID_SKGE_CHKSM_RX_OK_CTS, + SKCS_NUM_PROTOCOLS, + sizeof(SK_PNMI_CHECKSUM), + SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumRxOkCts), + SK_PNMI_RO, CsumStat, 0}, + {OID_SKGE_CHKSM_RX_UNABLE_CTS, + SKCS_NUM_PROTOCOLS, + sizeof(SK_PNMI_CHECKSUM), + SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumRxUnableCts), + SK_PNMI_RO, CsumStat, 0}, + {OID_SKGE_CHKSM_RX_ERR_CTS, + SKCS_NUM_PROTOCOLS, + sizeof(SK_PNMI_CHECKSUM), + SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumRxErrCts), + SK_PNMI_RO, CsumStat, 0}, + {OID_SKGE_CHKSM_TX_OK_CTS, + SKCS_NUM_PROTOCOLS, + sizeof(SK_PNMI_CHECKSUM), + SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumTxOkCts), + SK_PNMI_RO, CsumStat, 0}, + {OID_SKGE_CHKSM_TX_UNABLE_CTS, + SKCS_NUM_PROTOCOLS, + sizeof(SK_PNMI_CHECKSUM), + SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumTxUnableCts), + SK_PNMI_RO, CsumStat, 0}, + {OID_SKGE_STAT_TX, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX}, + {OID_SKGE_STAT_TX_OCTETS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxOctetsOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_OCTET}, + {OID_SKGE_STAT_TX_BROADCAST, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxBroadcastOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_BROADCAST}, + {OID_SKGE_STAT_TX_MULTICAST, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMulticastOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MULTICAST}, + {OID_SKGE_STAT_TX_UNICAST, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxUnicastOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_UNICAST}, + {OID_SKGE_STAT_TX_LONGFRAMES, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxLongFramesCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_LONGFRAMES}, + {OID_SKGE_STAT_TX_BURST, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxBurstCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_BURST}, + {OID_SKGE_STAT_TX_PFLOWC, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxPauseMacCtrlCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_PMACC}, + {OID_SKGE_STAT_TX_FLOWC, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMacCtrlCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MACC}, + {OID_SKGE_STAT_TX_SINGLE_COL, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxSingleCollisionCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_SINGLE_COL}, + {OID_SKGE_STAT_TX_MULTI_COL, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMultipleCollisionCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MULTI_COL}, + {OID_SKGE_STAT_TX_EXCESS_COL, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxExcessiveCollisionCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_EXCESS_COL}, + {OID_SKGE_STAT_TX_LATE_COL, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxLateCollisionCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_LATE_COL}, + {OID_SKGE_STAT_TX_DEFFERAL, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxDeferralCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_DEFFERAL}, + {OID_SKGE_STAT_TX_EXCESS_DEF, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxExcessiveDeferralCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_EXCESS_DEF}, + {OID_SKGE_STAT_TX_UNDERRUN, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxFifoUnderrunCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_UNDERRUN}, + {OID_SKGE_STAT_TX_CARRIER, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxCarrierCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_CARRIER}, +/* {OID_SKGE_STAT_TX_UTIL, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxUtilization), + SK_PNMI_RO, MacPrivateStat, (SK_U16)(-1)}, */ + {OID_SKGE_STAT_TX_64, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx64Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_64}, + {OID_SKGE_STAT_TX_127, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx127Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_127}, + {OID_SKGE_STAT_TX_255, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx255Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_255}, + {OID_SKGE_STAT_TX_511, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx511Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_511}, + {OID_SKGE_STAT_TX_1023, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx1023Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_1023}, + {OID_SKGE_STAT_TX_MAX, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMaxCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MAX}, + {OID_SKGE_STAT_TX_SYNC, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxSyncCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_SYNC}, + {OID_SKGE_STAT_TX_SYNC_OCTETS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxSyncOctetsCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_SYNC_OCTET}, + {OID_SKGE_STAT_RX, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX}, + {OID_SKGE_STAT_RX_OCTETS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxOctetsOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_OCTET}, + {OID_SKGE_STAT_RX_BROADCAST, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxBroadcastOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_BROADCAST}, + {OID_SKGE_STAT_RX_MULTICAST, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMulticastOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MULTICAST}, + {OID_SKGE_STAT_RX_UNICAST, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxUnicastOkCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_UNICAST}, + {OID_SKGE_STAT_RX_LONGFRAMES, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxLongFramesCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_LONGFRAMES}, + {OID_SKGE_STAT_RX_PFLOWC, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxPauseMacCtrlCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_PMACC}, + {OID_SKGE_STAT_RX_FLOWC, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMacCtrlCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MACC}, + {OID_SKGE_STAT_RX_PFLOWC_ERR, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxPauseMacCtrlErrorCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_PMACC_ERR}, + {OID_SKGE_STAT_RX_FLOWC_UNKWN, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMacCtrlUnknownCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MACC_UNKWN}, + {OID_SKGE_STAT_RX_BURST, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxBurstCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_BURST}, + {OID_SKGE_STAT_RX_MISSED, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMissedCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MISSED}, + {OID_SKGE_STAT_RX_FRAMING, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxFramingCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_FRAMING}, + {OID_SKGE_STAT_RX_OVERFLOW, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxFifoOverflowCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_OVERFLOW}, + {OID_SKGE_STAT_RX_JABBER, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxJabberCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_JABBER}, + {OID_SKGE_STAT_RX_CARRIER, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxCarrierCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_CARRIER}, + {OID_SKGE_STAT_RX_IR_LENGTH, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxIRLengthCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_IRLENGTH}, + {OID_SKGE_STAT_RX_SYMBOL, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxSymbolCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_SYMBOL}, + {OID_SKGE_STAT_RX_SHORTS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxShortsCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_SHORTS}, + {OID_SKGE_STAT_RX_RUNT, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxRuntCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_RUNT}, + {OID_SKGE_STAT_RX_CEXT, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxCextCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_CEXT}, + {OID_SKGE_STAT_RX_TOO_LONG, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxTooLongCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_TOO_LONG}, + {OID_SKGE_STAT_RX_FCS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxFcsCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_FCS}, +/* {OID_SKGE_STAT_RX_UTIL, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxUtilization), + SK_PNMI_RO, MacPrivateStat, (SK_U16)(-1)}, */ + {OID_SKGE_STAT_RX_64, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx64Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_64}, + {OID_SKGE_STAT_RX_127, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx127Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_127}, + {OID_SKGE_STAT_RX_255, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx255Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_255}, + {OID_SKGE_STAT_RX_511, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx511Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_511}, + {OID_SKGE_STAT_RX_1023, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx1023Cts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_1023}, + {OID_SKGE_STAT_RX_MAX, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_STAT), + SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMaxCts), + SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MAX}, + {OID_SKGE_PHYS_CUR_ADDR, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfMacCurrentAddr), + SK_PNMI_RW, Addr, 0}, + {OID_SKGE_PHYS_FAC_ADDR, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfMacFactoryAddr), + SK_PNMI_RO, Addr, 0}, + {OID_SKGE_PMD, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPMD), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_CONNECTOR, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfConnector), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_LINK_CAP, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkCapability), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_LINK_MODE, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkMode), + SK_PNMI_RW, MacPrivateConf, 0}, + {OID_SKGE_LINK_MODE_STATUS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkModeStatus), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_LINK_STATUS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkStatus), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_FLOWCTRL_CAP, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfFlowCtrlCapability), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_FLOWCTRL_MODE, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfFlowCtrlMode), + SK_PNMI_RW, MacPrivateConf, 0}, + {OID_SKGE_FLOWCTRL_STATUS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfFlowCtrlStatus), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_PHY_OPERATION_CAP, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyOperationCapability), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_PHY_OPERATION_MODE, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyOperationMode), + SK_PNMI_RW, MacPrivateConf, 0}, + {OID_SKGE_PHY_OPERATION_STATUS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyOperationStatus), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_SPEED_CAP, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfSpeedCapability), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_SPEED_MODE, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfSpeedMode), + SK_PNMI_RW, MacPrivateConf, 0}, + {OID_SKGE_SPEED_STATUS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_CONF), + SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfSpeedStatus), + SK_PNMI_RO, MacPrivateConf, 0}, + {OID_SKGE_TRAP, + 1, + 0, + SK_PNMI_MAI_OFF(Trap), + SK_PNMI_RO, General, 0}, + {OID_SKGE_TRAP_NUMBER, + 1, + 0, + SK_PNMI_MAI_OFF(TrapNumber), + SK_PNMI_RO, General, 0}, + {OID_SKGE_RLMT_MODE, + 1, + 0, + SK_PNMI_MAI_OFF(RlmtMode), + SK_PNMI_RW, Rlmt, 0}, + {OID_SKGE_RLMT_PORT_NUMBER, + 1, + 0, + SK_PNMI_MAI_OFF(RlmtPortNumber), + SK_PNMI_RO, Rlmt, 0}, + {OID_SKGE_RLMT_PORT_ACTIVE, + 1, + 0, + SK_PNMI_MAI_OFF(RlmtPortActive), + SK_PNMI_RO, Rlmt, 0}, + {OID_SKGE_RLMT_PORT_PREFERRED, + 1, + 0, + SK_PNMI_MAI_OFF(RlmtPortPreferred), + SK_PNMI_RW, Rlmt, 0}, + {OID_SKGE_RLMT_CHANGE_CTS, + 1, + 0, + SK_PNMI_MAI_OFF(RlmtChangeCts), + SK_PNMI_RO, Rlmt, 0}, + {OID_SKGE_RLMT_CHANGE_TIME, + 1, + 0, + SK_PNMI_MAI_OFF(RlmtChangeTime), + SK_PNMI_RO, Rlmt, 0}, + {OID_SKGE_RLMT_CHANGE_ESTIM, + 1, + 0, + SK_PNMI_MAI_OFF(RlmtChangeEstimate), + SK_PNMI_RO, Rlmt, 0}, + {OID_SKGE_RLMT_CHANGE_THRES, + 1, + 0, + SK_PNMI_MAI_OFF(RlmtChangeThreshold), + SK_PNMI_RW, Rlmt, 0}, + {OID_SKGE_RLMT_PORT_INDEX, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_RLMT), + SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtIndex), + SK_PNMI_RO, RlmtStat, 0}, + {OID_SKGE_RLMT_STATUS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_RLMT), + SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtStatus), + SK_PNMI_RO, RlmtStat, 0}, + {OID_SKGE_RLMT_TX_HELLO_CTS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_RLMT), + SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtTxHelloCts), + SK_PNMI_RO, RlmtStat, 0}, + {OID_SKGE_RLMT_RX_HELLO_CTS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_RLMT), + SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtRxHelloCts), + SK_PNMI_RO, RlmtStat, 0}, + {OID_SKGE_RLMT_TX_SP_REQ_CTS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_RLMT), + SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtTxSpHelloReqCts), + SK_PNMI_RO, RlmtStat, 0}, + {OID_SKGE_RLMT_RX_SP_CTS, + SK_PNMI_MAC_ENTRIES, + sizeof(SK_PNMI_RLMT), + SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtRxSpHelloCts), + SK_PNMI_RO, RlmtStat, 0}, + {OID_SKGE_RLMT_MONITOR_NUMBER, + 1, + 0, + SK_PNMI_MAI_OFF(RlmtMonitorNumber), + SK_PNMI_RO, General, 0}, + {OID_SKGE_RLMT_MONITOR_INDEX, + SK_PNMI_MONITOR_ENTRIES, + sizeof(SK_PNMI_RLMT_MONITOR), + SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorIndex), + SK_PNMI_RO, Monitor, 0}, + {OID_SKGE_RLMT_MONITOR_ADDR, + SK_PNMI_MONITOR_ENTRIES, + sizeof(SK_PNMI_RLMT_MONITOR), + SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorAddr), + SK_PNMI_RO, Monitor, 0}, + {OID_SKGE_RLMT_MONITOR_ERRS, + SK_PNMI_MONITOR_ENTRIES, + sizeof(SK_PNMI_RLMT_MONITOR), + SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorErrorCts), + SK_PNMI_RO, Monitor, 0}, + {OID_SKGE_RLMT_MONITOR_TIMESTAMP, + SK_PNMI_MONITOR_ENTRIES, + sizeof(SK_PNMI_RLMT_MONITOR), + SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorTimestamp), + SK_PNMI_RO, Monitor, 0}, + {OID_SKGE_RLMT_MONITOR_ADMIN, + SK_PNMI_MONITOR_ENTRIES, + sizeof(SK_PNMI_RLMT_MONITOR), + SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorAdmin), + SK_PNMI_RW, Monitor, 0}, + {OID_SKGE_MTU, + 1, + 0, + SK_PNMI_MAI_OFF(MtuSize), + SK_PNMI_RW, MacPrivateConf, 0}, + {OID_SKGE_VCT_GET, + 0, + 0, + 0, + SK_PNMI_RO, Vct, 0}, + {OID_SKGE_VCT_SET, + 0, + 0, + 0, + SK_PNMI_WO, Vct, 0}, + {OID_SKGE_VCT_STATUS, + 0, + 0, + 0, + SK_PNMI_RO, Vct, 0}, +}; + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/skgepnmi.c linux.21pre4-ac2/drivers/net/sk98lin/skgepnmi.c --- linux.21pre4/drivers/net/sk98lin/skgepnmi.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/skgepnmi.c 2003-01-06 15:38:18.000000000 +0000 @@ -2,15 +2,15 @@ * * Name: skgepnmi.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.87 $ - * Date: $Date: 2001/04/06 13:35:09 $ + * Version: $Revision: 1.102 $ + * Date: $Date: 2002/12/16 14:03:24 $ * Purpose: Private Network Management Interface * ****************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2001 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +26,78 @@ * History: * * $Log: skgepnmi.c,v $ + * Revision 1.102 2002/12/16 14:03:24 tschilli + * VCT code in Vct() changed. + * + * Revision 1.101 2002/12/16 09:04:10 tschilli + * Code for VCT handling added. + * + * Revision 1.100 2002/09/26 14:28:13 tschilli + * For XMAC the values in the SK_PNMI_PORT Port struct are copied to + * the new SK_PNMI_PORT BufPort struct during a MacUpdate() call. + * These values are used when GetPhysStatVal() is called. With this + * mechanism you get the best results when software corrections for + * counters are needed. Example: RX_LONGFRAMES. + * + * Revision 1.99 2002/09/17 12:31:19 tschilli + * OID_SKGE_TX_HW_ERROR_CTS, OID_SKGE_OUT_ERROR_CTS, OID_GEN_XMIT_ERROR: + * Double count of SK_PNMI_HTX_EXCESS_COL in function General() removed. + * OID_PNP_CAPABILITIES: sizeof(SK_PM_WAKE_UP_CAPABILITIES) changed to + * sizeof(SK_PNP_CAPABILITIES) in function PowerManagement(). + * + * Revision 1.98 2002/09/10 09:00:03 rwahl + * Adapted boolean definitions according sktypes. + * + * Revision 1.97 2002/09/05 15:07:03 rwahl + * Editorial changes. + * + * Revision 1.96 2002/09/05 11:04:14 rwahl + * - Rx/Tx packets statistics of virtual port were zero on link down (#10750) + * - For GMAC the overflow IRQ for Rx longframe counter was not counted. + * - Incorrect calculation for oids OID_SKGE_RX_HW_ERROR_CTS, + * OID_SKGE_IN_ERRORS_CTS, OID_GEN_RCV_ERROR. + * - Moved correction for OID_SKGE_STAT_RX_TOO_LONG to GetPhysStatVal(). + * - Editorial changes. + * + * Revision 1.95 2002/09/04 08:53:37 rwahl + * - Incorrect statistics for Rx_too_long counter with jumbo frame (#10751) + * - StatRxFrameTooLong & StatRxPMaccErr counters were not reset. + * - Fixed compiler warning for debug msg arg types. + * + * Revision 1.94 2002/08/09 15:42:14 rwahl + * - Fixed StatAddr table for GMAC. + * - VirtualConf(): returned indeterminated status for speed oids if no + * active port. + * + * Revision 1.93 2002/08/09 11:04:59 rwahl + * Added handler for link speed caps. + * + * Revision 1.92 2002/08/09 09:43:03 rwahl + * - Added handler for NDIS OID_PNP_xxx ids. + * + * Revision 1.91 2002/07/17 19:53:03 rwahl + * - Added StatOvrflwBit table for XMAC & GMAC. + * - Extended StatAddr table for GMAC. Added check of number of counters + * in enumeration and size of StatAddr table on init level. + * - Added use of GIFunc table. + * - ChipSet is not static anymore, + * - Extended SIRQ event handler for both mac types. + * - Fixed rx short counter bug (#10620) + * - Added handler for oids SKGE_SPEED_MODE & SKGE_SPEED_STATUS. + * - Extendet GetPhysStatVal() for GMAC. + * - Editorial changes. + * + * Revision 1.90 2002/05/22 08:56:25 rwahl + * - Moved OID table to separate source file. + * - Fix: TX_DEFFERAL counter incremented in full-duplex mode. + * - Use string definitions for error msgs. + * + * Revision 1.89 2001/09/18 10:01:30 mkunz + * some OID's fixed for dualnetmode + * + * Revision 1.88 2001/08/02 07:58:08 rwahl + * - Fixed NetIndex to csum module at ResetCounter(). + * * Revision 1.87 2001/04/06 13:35:09 mkunz * -Bugs fixed in handling of OID_SKGE_MTU and the VPD OID's * @@ -37,7 +109,6 @@ * * Revision 1.84 2001/03/06 09:04:55 mkunz * Made some changes in instance calculation - * C ^VS: * * Revision 1.83 2001/02/15 09:15:32 mkunz * Necessary changes for dual net mode added @@ -362,13 +433,12 @@ static const char SysKonnectFileId[] = - "@(#) $Id: skgepnmi.c,v 1.87 2001/04/06 13:35:09 mkunz Exp $" + "@(#) $Id: skgepnmi.c,v 1.102 2002/12/16 14:03:24 tschilli Exp $" " (C) SysKonnect."; #include "h/skdrv1st.h" #include "h/sktypes.h" #include "h/xmac_ii.h" - #include "h/skdebug.h" #include "h/skqueue.h" #include "h/skgepnmi.h" @@ -379,7 +449,16 @@ #include "h/skgeinit.h" #include "h/skdrv2nd.h" #include "h/skgepnm2.h" +#ifdef SK_POWER_MGMT +#include "h/skgepmgt.h" +#endif +/* defines *******************************************************************/ +#ifndef DEBUG +#define PNMI_STATIC static +#else /* DEBUG */ +#define PNMI_STATIC +#endif /* DEBUG */ /* * Public Function prototypes @@ -403,1037 +482,259 @@ /* * Private Function prototypes */ -static int Addr(SK_AC *pAC, SK_IOC IoC, int action, - SK_U32 Id, char *pBuf, unsigned int *pLen, SK_U32 Instance, - unsigned int TableIndex, SK_U32 NetIndex); -static SK_U8 CalculateLinkModeStatus(SK_AC *pAC, SK_IOC IoC, unsigned int + +PNMI_STATIC SK_U8 CalculateLinkModeStatus(SK_AC *pAC, SK_IOC IoC, unsigned int PhysPortIndex); -static SK_U8 CalculateLinkStatus(SK_AC *pAC, SK_IOC IoC, unsigned int +PNMI_STATIC SK_U8 CalculateLinkStatus(SK_AC *pAC, SK_IOC IoC, unsigned int PhysPortIndex); -static void CopyMac(char *pDst, SK_MAC_ADDR *pMac); -static void CopyTrapQueue(SK_AC *pAC, char *pDstBuf); -static int CsumStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, - char *pBuf, unsigned int *pLen, SK_U32 Instance, - unsigned int TableIndex, SK_U32 NetIndex); -static int General(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, - char *pBuf, unsigned int *pLen, SK_U32 Instance, - unsigned int TableIndex, SK_U32 NetIndex); -static SK_U64 GetPhysStatVal(SK_AC *pAC, SK_IOC IoC, +PNMI_STATIC void CopyMac(char *pDst, SK_MAC_ADDR *pMac); +PNMI_STATIC void CopyTrapQueue(SK_AC *pAC, char *pDstBuf); +PNMI_STATIC SK_U64 GetPhysStatVal(SK_AC *pAC, SK_IOC IoC, unsigned int PhysPortIndex, unsigned int StatIndex); -static SK_U64 GetStatVal(SK_AC *pAC, SK_IOC IoC, unsigned int LogPortIndex, +PNMI_STATIC SK_U64 GetStatVal(SK_AC *pAC, SK_IOC IoC, unsigned int LogPortIndex, unsigned int StatIndex, SK_U32 NetIndex); -static char* GetTrapEntry(SK_AC *pAC, SK_U32 TrapId, unsigned int Size); -static void GetTrapQueueLen(SK_AC *pAC, unsigned int *pLen, +PNMI_STATIC char* GetTrapEntry(SK_AC *pAC, SK_U32 TrapId, unsigned int Size); +PNMI_STATIC void GetTrapQueueLen(SK_AC *pAC, unsigned int *pLen, unsigned int *pEntries); -static int GetVpdKeyArr(SK_AC *pAC, SK_IOC IoC, char *pKeyArr, +PNMI_STATIC int GetVpdKeyArr(SK_AC *pAC, SK_IOC IoC, char *pKeyArr, unsigned int KeyArrLen, unsigned int *pKeyNo); -static int LookupId(SK_U32 Id); -static int Mac8023Stat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, - char *pBuf, unsigned int *pLen, SK_U32 Instance, - unsigned int TableIndex, SK_U32 NetIndex); -static int MacPrivateConf(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, - char *pBuf, unsigned int *pLen, SK_U32 Instance, - unsigned int TableIndex, SK_U32 NetIndex); -static int MacPrivateStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, - char *pBuf, unsigned int *pLen, SK_U32 Instance, - unsigned int TableIndex, SK_U32 NetIndex); -static int MacUpdate(SK_AC *pAC, SK_IOC IoC, unsigned int FirstMac, +PNMI_STATIC int LookupId(SK_U32 Id); +PNMI_STATIC int MacUpdate(SK_AC *pAC, SK_IOC IoC, unsigned int FirstMac, unsigned int LastMac); -static int Monitor(SK_AC *pAC, SK_IOC IoC, int action, - SK_U32 Id, char *pBuf, unsigned int *pLen, SK_U32 Instance, - unsigned int TableIndex, SK_U32 NetIndex); -static int OidStruct(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, - char *pBuf, unsigned int *pLen, SK_U32 Instance, - unsigned int TableIndex, SK_U32 NetIndex); -static int Perform(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, - char *pBuf, unsigned int* pLen, SK_U32 Instance, - unsigned int TableIndex, SK_U32 NetIndex); -static int PnmiStruct(SK_AC *pAC, SK_IOC IoC, int Action, char *pBuf, +PNMI_STATIC int PnmiStruct(SK_AC *pAC, SK_IOC IoC, int Action, char *pBuf, unsigned int *pLen, SK_U32 NetIndex); -static int PnmiVar(SK_AC *pAC, SK_IOC IoC, int Action, SK_U32 Id, +PNMI_STATIC int PnmiVar(SK_AC *pAC, SK_IOC IoC, int Action, SK_U32 Id, char *pBuf, unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex); -static void QueueRlmtNewMacTrap(SK_AC *pAC, unsigned int ActiveMac); -static void QueueRlmtPortTrap(SK_AC *pAC, SK_U32 TrapId, +PNMI_STATIC void QueueRlmtNewMacTrap(SK_AC *pAC, unsigned int ActiveMac); +PNMI_STATIC void QueueRlmtPortTrap(SK_AC *pAC, SK_U32 TrapId, unsigned int PortIndex); -static void QueueSensorTrap(SK_AC *pAC, SK_U32 TrapId, +PNMI_STATIC void QueueSensorTrap(SK_AC *pAC, SK_U32 TrapId, unsigned int SensorIndex); -static void QueueSimpleTrap(SK_AC *pAC, SK_U32 TrapId); -static void ResetCounter(SK_AC *pAC, SK_IOC IoC, SK_U32 NetIndex); -static int Rlmt(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, - char *pBuf, unsigned int *pLen, SK_U32 Instance, - unsigned int TableIndex, SK_U32 NetIndex); -static int RlmtStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, - char *pBuf, unsigned int *pLen, SK_U32 Instance, - unsigned int TableIndex, SK_U32 NetIndex); -static int RlmtUpdate(SK_AC *pAC, SK_IOC IoC, SK_U32 NetIndex); -static int SensorStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, - char *pBuf, unsigned int *pLen, SK_U32 Instance, - unsigned int TableIndex, SK_U32 NetIndex); -static int SirqUpdate(SK_AC *pAC, SK_IOC IoC); -static void VirtualConf(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, char *pBuf); -static int Vpd(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id, - char *pBuf, unsigned int *pLen, SK_U32 Instance, - unsigned int TableIndex, SK_U32 NetIndex); - - -/****************************************************************************** - * - * Global variables - */ +PNMI_STATIC void QueueSimpleTrap(SK_AC *pAC, SK_U32 TrapId); +PNMI_STATIC void ResetCounter(SK_AC *pAC, SK_IOC IoC, SK_U32 NetIndex); +PNMI_STATIC int RlmtUpdate(SK_AC *pAC, SK_IOC IoC, SK_U32 NetIndex); +PNMI_STATIC int SirqUpdate(SK_AC *pAC, SK_IOC IoC); +PNMI_STATIC void VirtualConf(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, char *pBuf); +PNMI_STATIC int Vct(SK_AC *pAC, SK_IOC IoC, int Action, SK_U32 Id, char *pBuf, + unsigned int *pLen, SK_U32 Instance, unsigned int TableIndex, SK_U32 NetIndex); +PNMI_STATIC void CheckVctStatus(SK_AC *, SK_IOC, char *, SK_U32, SK_U32); /* * Table to correlate OID with handler function and index to * hardware register stored in StatAddress if applicable. */ -static const SK_PNMI_TAB_ENTRY IdTable[] = { - {OID_GEN_XMIT_OK, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX}, - {OID_GEN_RCV_OK, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX}, - {OID_GEN_XMIT_ERROR, - 0, - 0, - 0, - SK_PNMI_RO, General, 0}, - {OID_GEN_RCV_ERROR, - 0, - 0, - 0, - SK_PNMI_RO, General, 0}, - {OID_GEN_RCV_NO_BUFFER, - 0, - 0, - 0, - SK_PNMI_RO, General, 0}, - {OID_GEN_DIRECTED_FRAMES_XMIT, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_UNICAST}, - {OID_GEN_MULTICAST_FRAMES_XMIT, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_MULTICAST}, - {OID_GEN_BROADCAST_FRAMES_XMIT, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_BROADCAST}, - {OID_GEN_DIRECTED_FRAMES_RCV, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_UNICAST}, - {OID_GEN_MULTICAST_FRAMES_RCV, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_MULTICAST}, - {OID_GEN_BROADCAST_FRAMES_RCV, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_BROADCAST}, - {OID_GEN_RCV_CRC_ERROR, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_FCS}, - {OID_GEN_TRANSMIT_QUEUE_LENGTH, - 0, - 0, - 0, - SK_PNMI_RO, General, 0}, - {OID_802_3_PERMANENT_ADDRESS, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, 0}, - {OID_802_3_CURRENT_ADDRESS, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, 0}, - {OID_802_3_RCV_ERROR_ALIGNMENT, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_FRAMING}, - {OID_802_3_XMIT_ONE_COLLISION, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_SINGLE_COL}, - {OID_802_3_XMIT_MORE_COLLISIONS, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_MULTI_COL}, - {OID_802_3_XMIT_DEFERRED, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_DEFFERAL}, - {OID_802_3_XMIT_MAX_COLLISIONS, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_EXCESS_COL}, - {OID_802_3_RCV_OVERRUN, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_OVERFLOW}, - {OID_802_3_XMIT_UNDERRUN, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_UNDERRUN}, - {OID_802_3_XMIT_TIMES_CRS_LOST, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_CARRIER}, - {OID_802_3_XMIT_LATE_COLLISIONS, - 0, - 0, - 0, - SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_LATE_COL}, - {OID_SKGE_MDB_VERSION, - 1, - 0, - SK_PNMI_MAI_OFF(MgmtDBVersion), - SK_PNMI_RO, General, 0}, - {OID_SKGE_SUPPORTED_LIST, - 0, - 0, - 0, - SK_PNMI_RO, General, 0}, - {OID_SKGE_ALL_DATA, - 0, - 0, - 0, - SK_PNMI_RW, OidStruct, 0}, - {OID_SKGE_VPD_FREE_BYTES, - 1, - 0, - SK_PNMI_MAI_OFF(VpdFreeBytes), - SK_PNMI_RO, Vpd, 0}, - {OID_SKGE_VPD_ENTRIES_LIST, - 1, - 0, - SK_PNMI_MAI_OFF(VpdEntriesList), - SK_PNMI_RO, Vpd, 0}, - {OID_SKGE_VPD_ENTRIES_NUMBER, - 1, - 0, - SK_PNMI_MAI_OFF(VpdEntriesNumber), - SK_PNMI_RO, Vpd, 0}, - {OID_SKGE_VPD_KEY, - SK_PNMI_VPD_ENTRIES, - sizeof(SK_PNMI_VPD), - SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdKey), - SK_PNMI_RO, Vpd, 0}, - {OID_SKGE_VPD_VALUE, - SK_PNMI_VPD_ENTRIES, - sizeof(SK_PNMI_VPD), - SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdValue), - SK_PNMI_RO, Vpd, 0}, - {OID_SKGE_VPD_ACCESS, - SK_PNMI_VPD_ENTRIES, - sizeof(SK_PNMI_VPD), - SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdAccess), - SK_PNMI_RO, Vpd, 0}, - {OID_SKGE_VPD_ACTION, - SK_PNMI_VPD_ENTRIES, - sizeof(SK_PNMI_VPD), - SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdAction), - SK_PNMI_RW, Vpd, 0}, - {OID_SKGE_PORT_NUMBER, - 1, - 0, - SK_PNMI_MAI_OFF(PortNumber), - SK_PNMI_RO, General, 0}, - {OID_SKGE_DEVICE_TYPE, - 1, - 0, - SK_PNMI_MAI_OFF(DeviceType), - SK_PNMI_RO, General, 0}, - {OID_SKGE_DRIVER_DESCR, - 1, - 0, - SK_PNMI_MAI_OFF(DriverDescr), - SK_PNMI_RO, General, 0}, - {OID_SKGE_DRIVER_VERSION, - 1, - 0, - SK_PNMI_MAI_OFF(DriverVersion), - SK_PNMI_RO, General, 0}, - {OID_SKGE_HW_DESCR, - 1, - 0, - SK_PNMI_MAI_OFF(HwDescr), - SK_PNMI_RO, General, 0}, - {OID_SKGE_HW_VERSION, - 1, - 0, - SK_PNMI_MAI_OFF(HwVersion), - SK_PNMI_RO, General, 0}, - {OID_SKGE_CHIPSET, - 1, - 0, - SK_PNMI_MAI_OFF(Chipset), - SK_PNMI_RO, General, 0}, - {OID_SKGE_ACTION, - 1, - 0, - SK_PNMI_MAI_OFF(Action), - SK_PNMI_RW, Perform, 0}, - {OID_SKGE_RESULT, - 1, - 0, - SK_PNMI_MAI_OFF(TestResult), - SK_PNMI_RO, General, 0}, - {OID_SKGE_BUS_TYPE, - 1, - 0, - SK_PNMI_MAI_OFF(BusType), - SK_PNMI_RO, General, 0}, - {OID_SKGE_BUS_SPEED, - 1, - 0, - SK_PNMI_MAI_OFF(BusSpeed), - SK_PNMI_RO, General, 0}, - {OID_SKGE_BUS_WIDTH, - 1, - 0, - SK_PNMI_MAI_OFF(BusWidth), - SK_PNMI_RO, General, 0}, - {OID_SKGE_TX_SW_QUEUE_LEN, - 1, - 0, - SK_PNMI_MAI_OFF(TxSwQueueLen), - SK_PNMI_RO, General, 0}, - {OID_SKGE_TX_SW_QUEUE_MAX, - 1, - 0, - SK_PNMI_MAI_OFF(TxSwQueueMax), - SK_PNMI_RO, General, 0}, - {OID_SKGE_TX_RETRY, - 1, - 0, - SK_PNMI_MAI_OFF(TxRetryCts), - SK_PNMI_RO, General, 0}, - {OID_SKGE_RX_INTR_CTS, - 1, - 0, - SK_PNMI_MAI_OFF(RxIntrCts), - SK_PNMI_RO, General, 0}, - {OID_SKGE_TX_INTR_CTS, - 1, - 0, - SK_PNMI_MAI_OFF(TxIntrCts), - SK_PNMI_RO, General, 0}, - {OID_SKGE_RX_NO_BUF_CTS, - 1, - 0, - SK_PNMI_MAI_OFF(RxNoBufCts), - SK_PNMI_RO, General, 0}, - {OID_SKGE_TX_NO_BUF_CTS, - 1, - 0, - SK_PNMI_MAI_OFF(TxNoBufCts), - SK_PNMI_RO, General, 0}, - {OID_SKGE_TX_USED_DESCR_NO, - 1, - 0, - SK_PNMI_MAI_OFF(TxUsedDescrNo), - SK_PNMI_RO, General, 0}, - {OID_SKGE_RX_DELIVERED_CTS, - 1, - 0, - SK_PNMI_MAI_OFF(RxDeliveredCts), - SK_PNMI_RO, General, 0}, - {OID_SKGE_RX_OCTETS_DELIV_CTS, - 1, - 0, - SK_PNMI_MAI_OFF(RxOctetsDeliveredCts), - SK_PNMI_RO, General, 0}, - {OID_SKGE_RX_HW_ERROR_CTS, - 1, - 0, - SK_PNMI_MAI_OFF(RxHwErrorsCts), - SK_PNMI_RO, General, 0}, - {OID_SKGE_TX_HW_ERROR_CTS, - 1, - 0, - SK_PNMI_MAI_OFF(TxHwErrorsCts), - SK_PNMI_RO, General, 0}, - {OID_SKGE_IN_ERRORS_CTS, - 1, - 0, - SK_PNMI_MAI_OFF(InErrorsCts), - SK_PNMI_RO, General, 0}, - {OID_SKGE_OUT_ERROR_CTS, - 1, - 0, - SK_PNMI_MAI_OFF(OutErrorsCts), - SK_PNMI_RO, General, 0}, - {OID_SKGE_ERR_RECOVERY_CTS, - 1, - 0, - SK_PNMI_MAI_OFF(ErrRecoveryCts), - SK_PNMI_RO, General, 0}, - {OID_SKGE_SYSUPTIME, - 1, - 0, - SK_PNMI_MAI_OFF(SysUpTime), - SK_PNMI_RO, General, 0}, - {OID_SKGE_SENSOR_NUMBER, - 1, - 0, - SK_PNMI_MAI_OFF(SensorNumber), - SK_PNMI_RO, General, 0}, - {OID_SKGE_SENSOR_INDEX, - SK_PNMI_SENSOR_ENTRIES, - sizeof(SK_PNMI_SENSOR), - SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorIndex), - SK_PNMI_RO, SensorStat, 0}, - {OID_SKGE_SENSOR_DESCR, - SK_PNMI_SENSOR_ENTRIES, - sizeof(SK_PNMI_SENSOR), - SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorDescr), - SK_PNMI_RO, SensorStat, 0}, - {OID_SKGE_SENSOR_TYPE, - SK_PNMI_SENSOR_ENTRIES, - sizeof(SK_PNMI_SENSOR), - SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorType), - SK_PNMI_RO, SensorStat, 0}, - {OID_SKGE_SENSOR_VALUE, - SK_PNMI_SENSOR_ENTRIES, - sizeof(SK_PNMI_SENSOR), - SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorValue), - SK_PNMI_RO, SensorStat, 0}, - {OID_SKGE_SENSOR_WAR_THRES_LOW, - SK_PNMI_SENSOR_ENTRIES, - sizeof(SK_PNMI_SENSOR), - SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningThresholdLow), - SK_PNMI_RO, SensorStat, 0}, - {OID_SKGE_SENSOR_WAR_THRES_UPP, - SK_PNMI_SENSOR_ENTRIES, - sizeof(SK_PNMI_SENSOR), - SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningThresholdHigh), - SK_PNMI_RO, SensorStat, 0}, - {OID_SKGE_SENSOR_ERR_THRES_LOW, - SK_PNMI_SENSOR_ENTRIES, - sizeof(SK_PNMI_SENSOR), - SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorThresholdLow), - SK_PNMI_RO, SensorStat, 0}, - {OID_SKGE_SENSOR_ERR_THRES_UPP, - SK_PNMI_SENSOR_ENTRIES, - sizeof(SK_PNMI_SENSOR), - SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorThresholdHigh), - SK_PNMI_RO, SensorStat, 0}, - {OID_SKGE_SENSOR_STATUS, - SK_PNMI_SENSOR_ENTRIES, - sizeof(SK_PNMI_SENSOR), - SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorStatus), - SK_PNMI_RO, SensorStat, 0}, - {OID_SKGE_SENSOR_WAR_CTS, - SK_PNMI_SENSOR_ENTRIES, - sizeof(SK_PNMI_SENSOR), - SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningCts), - SK_PNMI_RO, SensorStat, 0}, - {OID_SKGE_SENSOR_ERR_CTS, - SK_PNMI_SENSOR_ENTRIES, - sizeof(SK_PNMI_SENSOR), - SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorCts), - SK_PNMI_RO, SensorStat, 0}, - {OID_SKGE_SENSOR_WAR_TIME, - SK_PNMI_SENSOR_ENTRIES, - sizeof(SK_PNMI_SENSOR), - SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningTimestamp), - SK_PNMI_RO, SensorStat, 0}, - {OID_SKGE_SENSOR_ERR_TIME, - SK_PNMI_SENSOR_ENTRIES, - sizeof(SK_PNMI_SENSOR), - SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorTimestamp), - SK_PNMI_RO, SensorStat, 0}, - {OID_SKGE_CHKSM_NUMBER, - 1, - 0, - SK_PNMI_MAI_OFF(ChecksumNumber), - SK_PNMI_RO, General, 0}, - {OID_SKGE_CHKSM_RX_OK_CTS, - SKCS_NUM_PROTOCOLS, - sizeof(SK_PNMI_CHECKSUM), - SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumRxOkCts), - SK_PNMI_RO, CsumStat, 0}, - {OID_SKGE_CHKSM_RX_UNABLE_CTS, - SKCS_NUM_PROTOCOLS, - sizeof(SK_PNMI_CHECKSUM), - SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumRxUnableCts), - SK_PNMI_RO, CsumStat, 0}, - {OID_SKGE_CHKSM_RX_ERR_CTS, - SKCS_NUM_PROTOCOLS, - sizeof(SK_PNMI_CHECKSUM), - SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumRxErrCts), - SK_PNMI_RO, CsumStat, 0}, - {OID_SKGE_CHKSM_TX_OK_CTS, - SKCS_NUM_PROTOCOLS, - sizeof(SK_PNMI_CHECKSUM), - SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumTxOkCts), - SK_PNMI_RO, CsumStat, 0}, - {OID_SKGE_CHKSM_TX_UNABLE_CTS, - SKCS_NUM_PROTOCOLS, - sizeof(SK_PNMI_CHECKSUM), - SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumTxUnableCts), - SK_PNMI_RO, CsumStat, 0}, - {OID_SKGE_STAT_TX, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxOkCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX}, - {OID_SKGE_STAT_TX_OCTETS, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxOctetsOkCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_OCTET}, - {OID_SKGE_STAT_TX_BROADCAST, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxBroadcastOkCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_BROADCAST}, - {OID_SKGE_STAT_TX_MULTICAST, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMulticastOkCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MULTICAST}, - {OID_SKGE_STAT_TX_UNICAST, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxUnicastOkCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_UNICAST}, - {OID_SKGE_STAT_TX_LONGFRAMES, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxLongFramesCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_LONGFRAMES}, - {OID_SKGE_STAT_TX_BURST, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxBurstCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_BURST}, - {OID_SKGE_STAT_TX_PFLOWC, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxPauseMacCtrlCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_PMACC}, - {OID_SKGE_STAT_TX_FLOWC, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMacCtrlCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MACC}, - {OID_SKGE_STAT_TX_SINGLE_COL, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxSingleCollisionCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_SINGLE_COL}, - {OID_SKGE_STAT_TX_MULTI_COL, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMultipleCollisionCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MULTI_COL}, - {OID_SKGE_STAT_TX_EXCESS_COL, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxExcessiveCollisionCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_EXCESS_COL}, - {OID_SKGE_STAT_TX_LATE_COL, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxLateCollisionCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_LATE_COL}, - {OID_SKGE_STAT_TX_DEFFERAL, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxDeferralCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_DEFFERAL}, - {OID_SKGE_STAT_TX_EXCESS_DEF, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxExcessiveDeferralCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_EXCESS_DEF}, - {OID_SKGE_STAT_TX_UNDERRUN, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxFifoUnderrunCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_UNDERRUN}, - {OID_SKGE_STAT_TX_CARRIER, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxCarrierCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_CARRIER}, -/* {OID_SKGE_STAT_TX_UTIL, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxUtilization), - SK_PNMI_RO, MacPrivateStat, (SK_U16)(-1)}, */ - {OID_SKGE_STAT_TX_64, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx64Cts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_64}, - {OID_SKGE_STAT_TX_127, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx127Cts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_127}, - {OID_SKGE_STAT_TX_255, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx255Cts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_255}, - {OID_SKGE_STAT_TX_511, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx511Cts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_511}, - {OID_SKGE_STAT_TX_1023, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx1023Cts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_1023}, - {OID_SKGE_STAT_TX_MAX, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMaxCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MAX}, - {OID_SKGE_STAT_TX_SYNC, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxSyncCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_SYNC}, - {OID_SKGE_STAT_TX_SYNC_OCTETS, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxSyncOctetsCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_SYNC_OCTET}, - {OID_SKGE_STAT_RX, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxOkCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX}, - {OID_SKGE_STAT_RX_OCTETS, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxOctetsOkCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_OCTET}, - {OID_SKGE_STAT_RX_BROADCAST, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxBroadcastOkCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_BROADCAST}, - {OID_SKGE_STAT_RX_MULTICAST, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMulticastOkCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MULTICAST}, - {OID_SKGE_STAT_RX_UNICAST, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxUnicastOkCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_UNICAST}, - {OID_SKGE_STAT_RX_LONGFRAMES, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxLongFramesCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_LONGFRAMES}, - {OID_SKGE_STAT_RX_PFLOWC, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxPauseMacCtrlCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_PMACC}, - {OID_SKGE_STAT_RX_FLOWC, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMacCtrlCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MACC}, - {OID_SKGE_STAT_RX_PFLOWC_ERR, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxPauseMacCtrlErrorCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_PMACC_ERR}, - {OID_SKGE_STAT_RX_FLOWC_UNKWN, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMacCtrlUnknownCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MACC_UNKWN}, - {OID_SKGE_STAT_RX_BURST, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxBurstCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_BURST}, - {OID_SKGE_STAT_RX_MISSED, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMissedCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MISSED}, - {OID_SKGE_STAT_RX_FRAMING, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxFramingCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_FRAMING}, - {OID_SKGE_STAT_RX_OVERFLOW, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxFifoOverflowCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_OVERFLOW}, - {OID_SKGE_STAT_RX_JABBER, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxJabberCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_JABBER}, - {OID_SKGE_STAT_RX_CARRIER, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxCarrierCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_CARRIER}, - {OID_SKGE_STAT_RX_IR_LENGTH, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxIRLengthCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_IRLENGTH}, - {OID_SKGE_STAT_RX_SYMBOL, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxSymbolCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_SYMBOL}, - {OID_SKGE_STAT_RX_SHORTS, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxShortsCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_SHORTS}, - {OID_SKGE_STAT_RX_RUNT, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxRuntCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_RUNT}, - {OID_SKGE_STAT_RX_CEXT, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxCextCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_CEXT}, - {OID_SKGE_STAT_RX_TOO_LONG, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxTooLongCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_TOO_LONG}, - {OID_SKGE_STAT_RX_FCS, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxFcsCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_FCS}, -/* {OID_SKGE_STAT_RX_UTIL, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxUtilization), - SK_PNMI_RO, MacPrivateStat, (SK_U16)(-1)}, */ - {OID_SKGE_STAT_RX_64, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx64Cts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_64}, - {OID_SKGE_STAT_RX_127, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx127Cts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_127}, - {OID_SKGE_STAT_RX_255, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx255Cts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_255}, - {OID_SKGE_STAT_RX_511, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx511Cts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_511}, - {OID_SKGE_STAT_RX_1023, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx1023Cts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_1023}, - {OID_SKGE_STAT_RX_MAX, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_STAT), - SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMaxCts), - SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MAX}, - {OID_SKGE_PHYS_CUR_ADDR, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_CONF), - SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfMacCurrentAddr), - SK_PNMI_RW, Addr, 0}, - {OID_SKGE_PHYS_FAC_ADDR, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_CONF), - SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfMacFactoryAddr), - SK_PNMI_RO, Addr, 0}, - {OID_SKGE_PMD, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_CONF), - SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPMD), - SK_PNMI_RO, MacPrivateConf, 0}, - {OID_SKGE_CONNECTOR, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_CONF), - SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfConnector), - SK_PNMI_RO, MacPrivateConf, 0}, - {OID_SKGE_LINK_CAP, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_CONF), - SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkCapability), - SK_PNMI_RO, MacPrivateConf, 0}, - {OID_SKGE_LINK_MODE, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_CONF), - SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkMode), - SK_PNMI_RW, MacPrivateConf, 0}, - {OID_SKGE_LINK_MODE_STATUS, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_CONF), - SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkModeStatus), - SK_PNMI_RO, MacPrivateConf, 0}, - {OID_SKGE_LINK_STATUS, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_CONF), - SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkStatus), - SK_PNMI_RO, MacPrivateConf, 0}, - {OID_SKGE_FLOWCTRL_CAP, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_CONF), - SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfFlowCtrlCapability), - SK_PNMI_RO, MacPrivateConf, 0}, - {OID_SKGE_FLOWCTRL_MODE, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_CONF), - SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfFlowCtrlMode), - SK_PNMI_RW, MacPrivateConf, 0}, - {OID_SKGE_FLOWCTRL_STATUS, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_CONF), - SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfFlowCtrlStatus), - SK_PNMI_RO, MacPrivateConf, 0}, - {OID_SKGE_PHY_OPERATION_CAP, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_CONF), - SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyOperationCapability), - SK_PNMI_RO, MacPrivateConf, 0}, - {OID_SKGE_PHY_OPERATION_MODE, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_CONF), - SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyOperationMode), - SK_PNMI_RW, MacPrivateConf, 0}, - {OID_SKGE_PHY_OPERATION_STATUS, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_CONF), - SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyOperationStatus), - SK_PNMI_RO, MacPrivateConf, 0}, - {OID_SKGE_TRAP, - 1, - 0, - SK_PNMI_MAI_OFF(Trap), - SK_PNMI_RO, General, 0}, - {OID_SKGE_TRAP_NUMBER, - 1, - 0, - SK_PNMI_MAI_OFF(TrapNumber), - SK_PNMI_RO, General, 0}, - {OID_SKGE_RLMT_MODE, - 1, - 0, - SK_PNMI_MAI_OFF(RlmtMode), - SK_PNMI_RW, Rlmt, 0}, - {OID_SKGE_RLMT_PORT_NUMBER, - 1, - 0, - SK_PNMI_MAI_OFF(RlmtPortNumber), - SK_PNMI_RO, Rlmt, 0}, - {OID_SKGE_RLMT_PORT_ACTIVE, - 1, - 0, - SK_PNMI_MAI_OFF(RlmtPortActive), - SK_PNMI_RO, Rlmt, 0}, - {OID_SKGE_RLMT_PORT_PREFERRED, - 1, - 0, - SK_PNMI_MAI_OFF(RlmtPortPreferred), - SK_PNMI_RW, Rlmt, 0}, - {OID_SKGE_RLMT_CHANGE_CTS, - 1, - 0, - SK_PNMI_MAI_OFF(RlmtChangeCts), - SK_PNMI_RO, Rlmt, 0}, - {OID_SKGE_RLMT_CHANGE_TIME, - 1, - 0, - SK_PNMI_MAI_OFF(RlmtChangeTime), - SK_PNMI_RO, Rlmt, 0}, - {OID_SKGE_RLMT_CHANGE_ESTIM, - 1, - 0, - SK_PNMI_MAI_OFF(RlmtChangeEstimate), - SK_PNMI_RO, Rlmt, 0}, - {OID_SKGE_RLMT_CHANGE_THRES, - 1, - 0, - SK_PNMI_MAI_OFF(RlmtChangeThreshold), - SK_PNMI_RW, Rlmt, 0}, - {OID_SKGE_RLMT_PORT_INDEX, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_RLMT), - SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtIndex), - SK_PNMI_RO, RlmtStat, 0}, - {OID_SKGE_RLMT_STATUS, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_RLMT), - SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtStatus), - SK_PNMI_RO, RlmtStat, 0}, - {OID_SKGE_RLMT_TX_HELLO_CTS, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_RLMT), - SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtTxHelloCts), - SK_PNMI_RO, RlmtStat, 0}, - {OID_SKGE_RLMT_RX_HELLO_CTS, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_RLMT), - SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtRxHelloCts), - SK_PNMI_RO, RlmtStat, 0}, - {OID_SKGE_RLMT_TX_SP_REQ_CTS, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_RLMT), - SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtTxSpHelloReqCts), - SK_PNMI_RO, RlmtStat, 0}, - {OID_SKGE_RLMT_RX_SP_CTS, - SK_PNMI_MAC_ENTRIES, - sizeof(SK_PNMI_RLMT), - SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtRxSpHelloCts), - SK_PNMI_RO, RlmtStat, 0}, - {OID_SKGE_RLMT_MONITOR_NUMBER, - 1, - 0, - SK_PNMI_MAI_OFF(RlmtMonitorNumber), - SK_PNMI_RO, General, 0}, - {OID_SKGE_RLMT_MONITOR_INDEX, - SK_PNMI_MONITOR_ENTRIES, - sizeof(SK_PNMI_RLMT_MONITOR), - SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorIndex), - SK_PNMI_RO, Monitor, 0}, - {OID_SKGE_RLMT_MONITOR_ADDR, - SK_PNMI_MONITOR_ENTRIES, - sizeof(SK_PNMI_RLMT_MONITOR), - SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorAddr), - SK_PNMI_RO, Monitor, 0}, - {OID_SKGE_RLMT_MONITOR_ERRS, - SK_PNMI_MONITOR_ENTRIES, - sizeof(SK_PNMI_RLMT_MONITOR), - SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorErrorCts), - SK_PNMI_RO, Monitor, 0}, - {OID_SKGE_RLMT_MONITOR_TIMESTAMP, - SK_PNMI_MONITOR_ENTRIES, - sizeof(SK_PNMI_RLMT_MONITOR), - SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorTimestamp), - SK_PNMI_RO, Monitor, 0}, - {OID_SKGE_RLMT_MONITOR_ADMIN, - SK_PNMI_MONITOR_ENTRIES, - sizeof(SK_PNMI_RLMT_MONITOR), - SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorAdmin), - SK_PNMI_RW, Monitor, 0}, - {OID_SKGE_MTU, - 1, - 0, - SK_PNMI_MAI_OFF(MtuSize), - SK_PNMI_RW, MacPrivateConf, 0}, +#include "skgemib.c" + +/* global variables **********************************************************/ + +/* + * Overflow status register bit table and corresponding counter + * dependent on MAC type - the number relates to the size of overflow + * mask returned by the pFnMacOverflow function + */ +PNMI_STATIC const SK_U16 StatOvrflwBit[][SK_PNMI_MAC_TYPES] = { +/* Bit0 */ { SK_PNMI_HTX, SK_PNMI_HTX_UNICAST}, +/* Bit1 */ { SK_PNMI_HTX_OCTETHIGH, SK_PNMI_HTX_BROADCAST}, +/* Bit2 */ { SK_PNMI_HTX_OCTETLOW, SK_PNMI_HTX_PMACC}, +/* Bit3 */ { SK_PNMI_HTX_BROADCAST, SK_PNMI_HTX_MULTICAST}, +/* Bit4 */ { SK_PNMI_HTX_MULTICAST, SK_PNMI_HTX_OCTETLOW}, +/* Bit5 */ { SK_PNMI_HTX_UNICAST, SK_PNMI_HTX_OCTETHIGH}, +/* Bit6 */ { SK_PNMI_HTX_LONGFRAMES, SK_PNMI_HTX_64}, +/* Bit7 */ { SK_PNMI_HTX_BURST, SK_PNMI_HTX_127}, +/* Bit8 */ { SK_PNMI_HTX_PMACC, SK_PNMI_HTX_255}, +/* Bit9 */ { SK_PNMI_HTX_MACC, SK_PNMI_HTX_511}, +/* Bit10 */ { SK_PNMI_HTX_SINGLE_COL, SK_PNMI_HTX_1023}, +/* Bit11 */ { SK_PNMI_HTX_MULTI_COL, SK_PNMI_HTX_MAX}, +/* Bit12 */ { SK_PNMI_HTX_EXCESS_COL, SK_PNMI_HTX_LONGFRAMES}, +/* Bit13 */ { SK_PNMI_HTX_LATE_COL, SK_PNMI_HTX_RESERVED}, +/* Bit14 */ { SK_PNMI_HTX_DEFFERAL, SK_PNMI_HTX_COL}, +/* Bit15 */ { SK_PNMI_HTX_EXCESS_DEF, SK_PNMI_HTX_LATE_COL}, +/* Bit16 */ { SK_PNMI_HTX_UNDERRUN, SK_PNMI_HTX_EXCESS_COL}, +/* Bit17 */ { SK_PNMI_HTX_CARRIER, SK_PNMI_HTX_MULTI_COL}, +/* Bit18 */ { SK_PNMI_HTX_UTILUNDER, SK_PNMI_HTX_SINGLE_COL}, +/* Bit19 */ { SK_PNMI_HTX_UTILOVER, SK_PNMI_HTX_UNDERRUN}, +/* Bit20 */ { SK_PNMI_HTX_64, SK_PNMI_HTX_RESERVED}, +/* Bit21 */ { SK_PNMI_HTX_127, SK_PNMI_HTX_RESERVED}, +/* Bit22 */ { SK_PNMI_HTX_255, SK_PNMI_HTX_RESERVED}, +/* Bit23 */ { SK_PNMI_HTX_511, SK_PNMI_HTX_RESERVED}, +/* Bit24 */ { SK_PNMI_HTX_1023, SK_PNMI_HTX_RESERVED}, +/* Bit25 */ { SK_PNMI_HTX_MAX, SK_PNMI_HTX_RESERVED}, +/* Bit26 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED}, +/* Bit27 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED}, +/* Bit28 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED}, +/* Bit29 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED}, +/* Bit30 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED}, +/* Bit31 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED}, +/* Bit32 */ { SK_PNMI_HRX, SK_PNMI_HRX_UNICAST}, +/* Bit33 */ { SK_PNMI_HRX_OCTETHIGH, SK_PNMI_HRX_BROADCAST}, +/* Bit34 */ { SK_PNMI_HRX_OCTETLOW, SK_PNMI_HRX_PMACC}, +/* Bit35 */ { SK_PNMI_HRX_BROADCAST, SK_PNMI_HRX_MULTICAST}, +/* Bit36 */ { SK_PNMI_HRX_MULTICAST, SK_PNMI_HRX_FCS}, +/* Bit37 */ { SK_PNMI_HRX_UNICAST, SK_PNMI_HRX_RESERVED}, +/* Bit38 */ { SK_PNMI_HRX_PMACC, SK_PNMI_HRX_OCTETLOW}, +/* Bit39 */ { SK_PNMI_HRX_MACC, SK_PNMI_HRX_OCTETHIGH}, +/* Bit40 */ { SK_PNMI_HRX_PMACC_ERR, SK_PNMI_HRX_BADOCTETLOW}, +/* Bit41 */ { SK_PNMI_HRX_MACC_UNKWN, SK_PNMI_HRX_BADOCTETHIGH}, +/* Bit42 */ { SK_PNMI_HRX_BURST, SK_PNMI_HRX_UNDERSIZE}, +/* Bit43 */ { SK_PNMI_HRX_MISSED, SK_PNMI_HRX_RUNT}, +/* Bit44 */ { SK_PNMI_HRX_FRAMING, SK_PNMI_HRX_64}, +/* Bit45 */ { SK_PNMI_HRX_OVERFLOW, SK_PNMI_HRX_127}, +/* Bit46 */ { SK_PNMI_HRX_JABBER, SK_PNMI_HRX_255}, +/* Bit47 */ { SK_PNMI_HRX_CARRIER, SK_PNMI_HRX_511}, +/* Bit48 */ { SK_PNMI_HRX_IRLENGTH, SK_PNMI_HRX_1023}, +/* Bit49 */ { SK_PNMI_HRX_SYMBOL, SK_PNMI_HRX_MAX}, +/* Bit50 */ { SK_PNMI_HRX_SHORTS, SK_PNMI_HRX_LONGFRAMES}, +/* Bit51 */ { SK_PNMI_HRX_RUNT, SK_PNMI_HRX_TOO_LONG}, +/* Bit52 */ { SK_PNMI_HRX_TOO_LONG, SK_PNMI_HRX_JABBER}, +/* Bit53 */ { SK_PNMI_HRX_FCS, SK_PNMI_HRX_RESERVED}, +/* Bit54 */ { SK_PNMI_HRX_RESERVED, SK_PNMI_HRX_OVERFLOW}, +/* Bit55 */ { SK_PNMI_HRX_CEXT, SK_PNMI_HRX_RESERVED}, +/* Bit56 */ { SK_PNMI_HRX_UTILUNDER, SK_PNMI_HRX_RESERVED}, +/* Bit57 */ { SK_PNMI_HRX_UTILOVER, SK_PNMI_HRX_RESERVED}, +/* Bit58 */ { SK_PNMI_HRX_64, SK_PNMI_HRX_RESERVED}, +/* Bit59 */ { SK_PNMI_HRX_127, SK_PNMI_HRX_RESERVED}, +/* Bit60 */ { SK_PNMI_HRX_255, SK_PNMI_HRX_RESERVED}, +/* Bit61 */ { SK_PNMI_HRX_511, SK_PNMI_HRX_RESERVED}, +/* Bit62 */ { SK_PNMI_HRX_1023, SK_PNMI_HRX_RESERVED}, +/* Bit63 */ { SK_PNMI_HRX_MAX, SK_PNMI_HRX_RESERVED} }; /* * Table for hardware register saving on resets and port switches */ -static const SK_PNMI_STATADDR StatAddress[SK_PNMI_MAX_IDX] = { - /* 0 */ {TRUE, XM_TXF_OK}, - /* 1 */ {TRUE, 0}, - /* 2 */ {FALSE, 0}, - /* 3 */ {TRUE, XM_TXF_BC_OK}, - /* 4 */ {TRUE, XM_TXF_MC_OK}, - /* 5 */ {TRUE, XM_TXF_UC_OK}, - /* 6 */ {TRUE, XM_TXF_LONG}, - /* 7 */ {TRUE, XM_TXE_BURST}, - /* 8 */ {TRUE, XM_TXF_MPAUSE}, - /* 9 */ {TRUE, XM_TXF_MCTRL}, - /* 10 */ {TRUE, XM_TXF_SNG_COL}, - /* 11 */ {TRUE, XM_TXF_MUL_COL}, - /* 12 */ {TRUE, XM_TXF_ABO_COL}, - /* 13 */ {TRUE, XM_TXF_LAT_COL}, - /* 14 */ {TRUE, XM_TXF_DEF}, - /* 15 */ {TRUE, XM_TXF_EX_DEF}, - /* 16 */ {TRUE, XM_TXE_FIFO_UR}, - /* 17 */ {TRUE, XM_TXE_CS_ERR}, - /* 18 */ {FALSE, 0}, - /* 19 */ {FALSE, 0}, - /* 20 */ {TRUE, XM_TXF_64B}, - /* 21 */ {TRUE, XM_TXF_127B}, - /* 22 */ {TRUE, XM_TXF_255B}, - /* 23 */ {TRUE, XM_TXF_511B}, - /* 24 */ {TRUE, XM_TXF_1023B}, - /* 25 */ {TRUE, XM_TXF_MAX_SZ}, - /* 26 */ {FALSE, 0}, - /* 27 */ {FALSE, 0}, - /* 28 */ {FALSE, 0}, - /* 29 */ {FALSE, 0}, - /* 30 */ {FALSE, 0}, - /* 31 */ {FALSE, 0}, - /* 32 */ {TRUE, XM_RXF_OK}, - /* 33 */ {TRUE, 0}, - /* 34 */ {FALSE, 0}, - /* 35 */ {TRUE, XM_RXF_BC_OK}, - /* 36 */ {TRUE, XM_RXF_MC_OK}, - /* 37 */ {TRUE, XM_RXF_UC_OK}, - /* 38 */ {TRUE, XM_RXF_MPAUSE}, - /* 39 */ {TRUE, XM_RXF_MCTRL}, - /* 40 */ {TRUE, XM_RXF_INV_MP}, - /* 41 */ {TRUE, XM_RXF_INV_MOC}, - /* 42 */ {TRUE, XM_RXE_BURST}, - /* 43 */ {TRUE, XM_RXE_FMISS}, - /* 44 */ {TRUE, XM_RXF_FRA_ERR}, - /* 45 */ {TRUE, XM_RXE_FIFO_OV}, - /* 46 */ {TRUE, XM_RXF_JAB_PKT}, - /* 47 */ {TRUE, XM_RXE_CAR_ERR}, - /* 48 */ {TRUE, XM_RXF_LEN_ERR}, - /* 49 */ {TRUE, XM_RXE_SYM_ERR}, - /* 50 */ {TRUE, XM_RXE_SHT_ERR}, - /* 51 */ {TRUE, XM_RXE_RUNT}, - /* 52 */ {TRUE, XM_RXF_LNG_ERR}, - /* 53 */ {TRUE, XM_RXF_FCS_ERR}, - /* 54 */ {FALSE, 0}, - /* 55 */ {TRUE, XM_RXF_CEX_ERR}, - /* 56 */ {FALSE, 0}, - /* 57 */ {FALSE, 0}, - /* 58 */ {TRUE, XM_RXF_64B}, - /* 59 */ {TRUE, XM_RXF_127B}, - /* 60 */ {TRUE, XM_RXF_255B}, - /* 61 */ {TRUE, XM_RXF_511B}, - /* 62 */ {TRUE, XM_RXF_1023B}, - /* 63 */ {TRUE, XM_RXF_MAX_SZ}, - /* 64 */ {FALSE, 0}, - /* 65 */ {FALSE, 0}, - /* 66 */ {TRUE, 0} +PNMI_STATIC const SK_PNMI_STATADDR StatAddr[SK_PNMI_MAX_IDX][SK_PNMI_MAC_TYPES] = { + /* SK_PNMI_HTX */ + {{XM_TXF_OK, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_OCTETHIGH */ + {{XM_TXO_OK_HI, SK_TRUE}, {GM_TXO_OK_HI, SK_TRUE}}, + /* SK_PNMI_HTX_OCTETLOW */ + {{XM_TXO_OK_LO, SK_FALSE}, {GM_TXO_OK_LO, SK_FALSE}}, + /* SK_PNMI_HTX_BROADCAST */ + {{XM_TXF_BC_OK, SK_TRUE}, {GM_TXF_BC_OK, SK_TRUE}}, + /* SK_PNMI_HTX_MULTICAST */ + {{XM_TXF_MC_OK, SK_TRUE}, {GM_TXF_MC_OK, SK_TRUE}}, + /* SK_PNMI_HTX_UNICAST */ + {{XM_TXF_UC_OK, SK_TRUE}, {GM_TXF_UC_OK, SK_TRUE}}, + /* SK_PNMI_HTX_BURST */ + {{XM_TXE_BURST, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_PMACC */ + {{XM_TXF_MPAUSE, SK_TRUE}, {GM_TXF_MPAUSE, SK_TRUE}}, + /* SK_PNMI_HTX_MACC */ + {{XM_TXF_MCTRL, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_COL */ + {{0, SK_FALSE}, {GM_TXF_COL, SK_TRUE}}, + /* SK_PNMI_HTX_SINGLE_COL */ + {{XM_TXF_SNG_COL, SK_TRUE}, {GM_TXF_SNG_COL, SK_TRUE}}, + /* SK_PNMI_HTX_MULTI_COL */ + {{XM_TXF_MUL_COL, SK_TRUE}, {GM_TXF_MUL_COL, SK_TRUE}}, + /* SK_PNMI_HTX_EXCESS_COL */ + {{XM_TXF_ABO_COL, SK_TRUE}, {GM_TXF_ABO_COL, SK_TRUE}}, + /* SK_PNMI_HTX_LATE_COL */ + {{XM_TXF_LAT_COL, SK_TRUE}, {GM_TXF_LAT_COL, SK_TRUE}}, + /* SK_PNMI_HTX_DEFFERAL */ + {{XM_TXF_DEF, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_EXCESS_DEF */ + {{XM_TXF_EX_DEF, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_UNDERRUN */ + {{XM_TXE_FIFO_UR, SK_TRUE}, {GM_TXE_FIFO_UR, SK_TRUE}}, + /* SK_PNMI_HTX_CARRIER */ + {{XM_TXE_CS_ERR, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_UTILUNDER */ + {{0, SK_FALSE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_UTILOVER */ + {{0, SK_FALSE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_64 */ + {{XM_TXF_64B, SK_TRUE}, {GM_TXF_64B, SK_TRUE}}, + /* SK_PNMI_HTX_127 */ + {{XM_TXF_127B, SK_TRUE}, {GM_TXF_127B, SK_TRUE}}, + /* SK_PNMI_HTX_255 */ + {{XM_TXF_255B, SK_TRUE}, {GM_TXF_255B, SK_TRUE}}, + /* SK_PNMI_HTX_511 */ + {{XM_TXF_511B, SK_TRUE}, {GM_TXF_511B, SK_TRUE}}, + /* SK_PNMI_HTX_1023 */ + {{XM_TXF_1023B, SK_TRUE}, {GM_TXF_1023B, SK_TRUE}}, + /* SK_PNMI_HTX_MAX */ + {{XM_TXF_MAX_SZ, SK_TRUE}, {GM_TXF_1518B, SK_TRUE}}, + /* SK_PNMI_HTX_LONGFRAMES */ + {{XM_TXF_LONG, SK_TRUE}, {GM_TXF_MAX_SZ, SK_TRUE}}, + /* SK_PNMI_HTX_SYNC */ + {{0, SK_FALSE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_SYNC_OCTET */ + {{0, SK_FALSE}, {0, SK_FALSE}}, + /* SK_PNMI_HTX_RESERVED */ + {{0, SK_FALSE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX */ + {{XM_RXF_OK, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_OCTETHIGH */ + {{XM_RXO_OK_HI, SK_TRUE}, {GM_RXO_OK_HI, SK_TRUE}}, + /* SK_PNMI_HRX_OCTETLOW */ + {{XM_RXO_OK_LO, SK_FALSE}, {GM_RXO_OK_LO, SK_FALSE}}, + /* SK_PNMI_HRX_BADOCTETHIGH */ + {{0, SK_FALSE}, {GM_RXO_ERR_HI, SK_TRUE}}, + /* SK_PNMI_HRX_BADOCTETLOW */ + {{0, SK_FALSE}, {GM_RXO_ERR_LO, SK_TRUE}}, + /* SK_PNMI_HRX_BROADCAST */ + {{XM_RXF_BC_OK, SK_TRUE}, {GM_RXF_BC_OK, SK_TRUE}}, + /* SK_PNMI_HRX_MULTICAST */ + {{XM_RXF_MC_OK, SK_TRUE}, {GM_RXF_MC_OK, SK_TRUE}}, + /* SK_PNMI_HRX_UNICAST */ + {{XM_RXF_UC_OK, SK_TRUE}, {GM_RXF_UC_OK, SK_TRUE}}, + /* SK_PNMI_HRX_PMACC */ + {{XM_RXF_MPAUSE, SK_TRUE}, {GM_RXF_MPAUSE, SK_TRUE}}, + /* SK_PNMI_HRX_MACC */ + {{XM_RXF_MCTRL, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_PMACC_ERR */ + {{XM_RXF_INV_MP, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_MACC_UNKWN */ + {{XM_RXF_INV_MOC, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_BURST */ + {{XM_RXE_BURST, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_MISSED */ + {{XM_RXE_FMISS, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_FRAMING */ + {{XM_RXF_FRA_ERR, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_UNDERSIZE */ + {{0, SK_FALSE},{GM_RXF_SHT, SK_TRUE}}, + /* SK_PNMI_HRX_OVERFLOW */ + {{XM_RXE_FIFO_OV, SK_TRUE}, {GM_RXE_FIFO_OV, SK_TRUE}}, + /* SK_PNMI_HRX_JABBER */ + {{XM_RXF_JAB_PKT, SK_TRUE}, {GM_RXF_JAB_PKT, SK_TRUE}}, + /* SK_PNMI_HRX_CARRIER */ + {{XM_RXE_CAR_ERR, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_IRLENGTH */ + {{XM_RXF_LEN_ERR, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_SYMBOL */ + {{XM_RXE_SYM_ERR, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_SHORTS */ + {{XM_RXE_SHT_ERR, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_RUNT */ + {{XM_RXE_RUNT, SK_TRUE}, {GM_RXE_FRAG, SK_TRUE}}, + /* SK_PNMI_HRX_TOO_LONG */ + {{XM_RXF_LNG_ERR, SK_TRUE}, {GM_RXF_LNG_ERR, SK_TRUE}}, + /* SK_PNMI_HRX_FCS */ + {{XM_RXF_FCS_ERR, SK_TRUE}, {GM_RXF_FCS_ERR, SK_TRUE}}, + /* SK_PNMI_HRX_CEXT */ + {{XM_RXF_CEX_ERR, SK_TRUE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_UTILUNDER */ + {{0, SK_FALSE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_UTILOVER */ + {{0, SK_FALSE}, {0, SK_FALSE}}, + /* SK_PNMI_HRX_64 */ + {{XM_RXF_64B, SK_TRUE}, {GM_RXF_64B, SK_TRUE}}, + /* SK_PNMI_HRX_127 */ + {{XM_RXF_127B, SK_TRUE}, {GM_RXF_127B, SK_TRUE}}, + /* SK_PNMI_HRX_255 */ + {{XM_RXF_255B, SK_TRUE}, {GM_RXF_255B, SK_TRUE}}, + /* SK_PNMI_HRX_511 */ + {{XM_RXF_511B, SK_TRUE}, {GM_RXF_511B, SK_TRUE}}, + /* SK_PNMI_HRX_1023 */ + {{XM_RXF_1023B, SK_TRUE}, {GM_RXF_1023B, SK_TRUE}}, + /* SK_PNMI_HRX_MAX */ + {{XM_RXF_MAX_SZ, SK_TRUE}, {GM_RXF_1518B, SK_TRUE}}, + /* SK_PNMI_HRX_LONGFRAMES */ + {{0, SK_FALSE}, {GM_RXF_MAX_SZ, SK_TRUE}}, + /* SK_PNMI_HRX_RESERVED */ + {{0, SK_FALSE}, {0, SK_FALSE}} }; @@ -1457,7 +758,6 @@ * Returns: * Always 0 */ - int SkPnmiInit( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ @@ -1468,6 +768,8 @@ SK_U16 Val16; /* Multiple purpose 16 bit variable */ SK_U8 Val8; /* Mulitple purpose 8 bit variable */ SK_EVPARA EventParam; /* Event struct for timer event */ + SK_GEPORT *pPrt; + SK_PNMI_VCT *pVctBackupData; SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, @@ -1485,6 +787,33 @@ pAC->Pnmi.Port[PortIndex].ActiveFlag = SK_FALSE; pAC->Pnmi.DualNetActiveFlag = SK_FALSE; } + +#ifdef SK_PNMI_CHECK + if (SK_PNMI_MAX_IDX != SK_PNMI_CNT_NO) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR049, SK_PNMI_ERR049MSG); + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_INIT | SK_DBGCAT_FATAL, + ("CounterOffset struct size (%d) differs from" + "SK_PNMI_MAX_IDX (%d)\n", + SK_PNMI_CNT_NO, SK_PNMI_MAX_IDX)); + BRK; + } + + if (SK_PNMI_MAX_IDX != + (sizeof(StatAddr) / (sizeof(SK_PNMI_STATADDR) * SK_PNMI_MAC_TYPES))) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR050, SK_PNMI_ERR050MSG); + + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_INIT | SK_DBGCAT_FATAL, + ("StatAddr table size (%d) differs from " + "SK_PNMI_MAX_IDX (%d)\n", + (sizeof(StatAddr) / + (sizeof(SK_PNMI_STATADDR) * SK_PNMI_MAC_TYPES)), + SK_PNMI_MAX_IDX)); + BRK; + } +#endif /* SK_PNMI_CHECK */ break; case SK_INIT_IO: @@ -1495,12 +824,17 @@ for (PortIndex = 0; PortIndex < PortMax; PortIndex ++) { - Val16 = XM_SC_CLR_RXC | XM_SC_CLR_TXC; - XM_OUT16(IoC, PortIndex, XM_STAT_CMD, Val16); - /* Clear two times according to Errata #3 */ - XM_OUT16(IoC, PortIndex, XM_STAT_CMD, Val16); + pAC->GIni.GIFunc.pFnMacResetCounter(pAC, IoC, PortIndex); } - + + /* Initialize DSP variables for Vct() to 0xff => Never written! */ + for (PortIndex = 0; PortIndex < PortMax; PortIndex ++) { + pPrt = &pAC->GIni.GP[PortIndex]; + pPrt->PCableLen =0xff; + pVctBackupData = &pAC->Pnmi.VctBackup[PortIndex]; + pVctBackupData->PCableLen = 0xff; + } + /* * Get pci bus speed */ @@ -1526,6 +860,22 @@ } /* + * Get chipset + */ + switch (pAC->GIni.GIChipId) { + case CHIP_ID_GENESIS: + pAC->Pnmi.Chipset = SK_PNMI_CHIPSET_XMAC; + break; + + case CHIP_ID_YUKON: + pAC->Pnmi.Chipset = SK_PNMI_CHIPSET_YUKON; + break; + + default: + break; + } + + /* * Get PMD and DeviceType */ SK_IN8(IoC, B2_PMD_TYP, &Val8); @@ -1608,14 +958,14 @@ default: pAC->Pnmi.Connector = 1; break; - } + } break; - + case SK_INIT_RUN: /* * Start timer for RLMT change counter */ - SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam)); + SK_MEMSET((char *) &EventParam, 0, sizeof(EventParam)); SkTimerStart(pAC, IoC, &pAC->Pnmi.RlmtChangeEstimate.EstTimer, 28125000, SKGE_PNMI, SK_PNMI_EVT_CHG_EST_TIMER, EventParam); @@ -1647,7 +997,6 @@ * exist (e.g. port instance 3 on a two port * adapter. */ - int SkPnmiGetVar( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ @@ -1690,7 +1039,6 @@ * exist (e.g. port instance 3 on a two port * adapter. */ - int SkPnmiPreSetVar( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ @@ -1734,7 +1082,6 @@ * exist (e.g. port instance 3 on a two port * adapter. */ - int SkPnmiSetVar( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ @@ -1771,7 +1118,6 @@ * the data. * SK_PNMI_ERR_UNKNOWN_NET The requested NetIndex doesn't exist */ - int SkPnmiGetStruct( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ @@ -1861,8 +1207,7 @@ /* Retrieve values */ SK_MEMSET((char *)pBuf, 0, SK_PNMI_STRUCT_SIZE); - for (TableIndex = 0; TableIndex < sizeof(IdTable)/sizeof(IdTable[0]); - TableIndex ++) { + for (TableIndex = 0; TableIndex < ID_TABLE_SIZE; TableIndex ++) { InstanceNo = IdTable[TableIndex].InstanceNo; for (InstanceCnt = 1; InstanceCnt <= InstanceNo; @@ -1951,7 +1296,6 @@ * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid * value range. */ - int SkPnmiPreSetStruct( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ @@ -1990,7 +1334,6 @@ * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid * value range. */ - int SkPnmiSetStruct( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ @@ -2056,7 +1399,6 @@ * Returns: * Always 0 */ - int SkPnmiEvent( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ @@ -2064,14 +1406,15 @@ SK_EVPARA Param) /* Event dependent parameter */ { unsigned int PhysPortIndex; - unsigned int MaxNetNumber; - int CounterIndex; - int Ret; + unsigned int MaxNetNumber; + int CounterIndex; + int Ret; SK_U16 MacStatus; SK_U64 OverflowStatus; SK_U64 Mask; - SK_U32 MacCntEvent; + int MacType; SK_U64 Value; + SK_U32 Val32; SK_U16 Register; SK_EVPARA EventParam; SK_U64 NewestValue; @@ -2079,6 +1422,11 @@ SK_U64 Delta; SK_PNMI_ESTIMATE *pEst; SK_U32 NetIndex; + SK_GEPORT *pPrt; + SK_PNMI_VCT *pVctBackupData; + SK_U32 RetCode; + int i; + SK_U32 CableLength; #ifdef DEBUG @@ -2086,11 +1434,13 @@ SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, ("PNMI: SkPnmiEvent: Called, Event=0x%x, Param=0x%x\n", - (unsigned long)Event, (unsigned long)Param.Para64)); + (unsigned int)Event, (unsigned int)Param.Para64)); } #endif SK_PNMI_CHECKFLAGS("SkPnmiEvent: On call"); + MacType = pAC->GIni.GIMacType; + switch (Event) { case SK_PNMI_EVT_SIRQ_OVERFLOW: @@ -2100,7 +1450,8 @@ if (PhysPortIndex >= SK_MAX_MACS) { SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, - ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_SIRQ_OVERFLOW parameter wrong, PhysPortIndex=0x%x\n", + ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_SIRQ_OVERFLOW parameter" + " wrong, PhysPortIndex=0x%x\n", PhysPortIndex)); return (0); } @@ -2108,28 +1459,14 @@ OverflowStatus = 0; /* - * Check which source caused an overflow interrupt. The - * interrupt source is a self-clearing register. We only - * need to check the interrupt source once. Another check - * will be done by the SIRQ module to be sure that no - * interrupt get lost during process time. + * Check which source caused an overflow interrupt. */ - if ((MacStatus & XM_IS_RXC_OV) == XM_IS_RXC_OV) { - - XM_IN32(IoC, PhysPortIndex, XM_RX_CNT_EV, - &MacCntEvent); - OverflowStatus |= (SK_U64)MacCntEvent << 32; - } - if ((MacStatus & XM_IS_TXC_OV) == XM_IS_TXC_OV) { - - XM_IN32(IoC, PhysPortIndex, XM_TX_CNT_EV, - &MacCntEvent); - OverflowStatus |= (SK_U64)MacCntEvent; - } - if (OverflowStatus == 0) { + if ((pAC->GIni.GIFunc.pFnMacOverflow( + pAC, IoC, PhysPortIndex, MacStatus, &OverflowStatus) != 0) || + (OverflowStatus == 0)) { SK_PNMI_CHECKFLAGS("SkPnmiEvent: On return"); - return (0); + return (0); } /* @@ -2145,7 +1482,7 @@ continue; } - switch (CounterIndex) { + switch (StatOvrflwBit[CounterIndex][MacType]) { case SK_PNMI_HTX_UTILUNDER: case SK_PNMI_HTX_UTILOVER: @@ -2167,23 +1504,24 @@ case SK_PNMI_HTX_OCTETHIGH: case SK_PNMI_HTX_OCTETLOW: - case SK_PNMI_HTX_RESERVED26: - case SK_PNMI_HTX_RESERVED27: - case SK_PNMI_HTX_RESERVED28: - case SK_PNMI_HTX_RESERVED29: - case SK_PNMI_HTX_RESERVED30: - case SK_PNMI_HTX_RESERVED31: + case SK_PNMI_HTX_RESERVED: case SK_PNMI_HRX_OCTETHIGH: case SK_PNMI_HRX_OCTETLOW: case SK_PNMI_HRX_IRLENGTH: - case SK_PNMI_HRX_RESERVED22: + case SK_PNMI_HRX_RESERVED: /* * the following counters aren't be handled (id > 63) */ case SK_PNMI_HTX_SYNC: case SK_PNMI_HTX_SYNC_OCTET: + break; + case SK_PNMI_HRX_LONGFRAMES: + if (MacType == SK_MAC_GMAC) { + pAC->Pnmi.Port[PhysPortIndex]. + CounterHigh[CounterIndex] ++; + } break; default: @@ -2333,7 +1671,7 @@ (void)SK_DRIVER_SENDEVENT(pAC, IoC); } - SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam)); + SK_MEMSET((char *) &EventParam, 0, sizeof(EventParam)); SkTimerStart(pAC, IoC, &pAC->Pnmi.RlmtChangeEstimate.EstTimer, 28125000, SKGE_PNMI, SK_PNMI_EVT_CHG_EST_TIMER, EventParam); @@ -2396,10 +1734,10 @@ */ pAC->Pnmi.MacUpdatedFlag ++; - for (CounterIndex = 0; CounterIndex < SK_PNMI_SCNT_NOT; + for (CounterIndex = 0; CounterIndex < SK_PNMI_MAX_IDX; CounterIndex ++) { - if (!StatAddress[CounterIndex].GetOffset) { + if (!StatAddr[CounterIndex][MacType].GetOffset) { continue; } @@ -2415,12 +1753,13 @@ break; case SK_PNMI_EVT_RLMT_PORT_UP: + PhysPortIndex = (unsigned int)Param.Para32[0]; #ifdef DEBUG - if ((unsigned int)Param.Para32[0] >= SK_MAX_MACS) { + if (PhysPortIndex >= SK_MAX_MACS) { SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, - ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_PORT_UP parameter wrong, PhysPortIndex=%d\n", - (unsigned int)Param.Para32[0])); + ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_PORT_UP parameter" + " wrong, PhysPortIndex=%d\n", PhysPortIndex)); return (0); } @@ -2429,18 +1768,35 @@ * Store a trap message in the trap buffer and generate an event for * user space applications with the SK_DRIVER_SENDEVENT macro. */ - QueueRlmtPortTrap(pAC, OID_SKGE_TRAP_RLMT_PORT_UP, - (unsigned int)Param.Para32[0]); + QueueRlmtPortTrap(pAC, OID_SKGE_TRAP_RLMT_PORT_UP, PhysPortIndex); (void)SK_DRIVER_SENDEVENT(pAC, IoC); + + /* Bugfix for XMAC errata (#10620)*/ + if (pAC->GIni.GIMacType == SK_MAC_XMAC){ + + /* Add incremental difference to offset (#10620)*/ + (void)pAC->GIni.GIFunc.pFnMacStatistic(pAC, IoC, PhysPortIndex, + XM_RXE_SHT_ERR, &Val32); + + Value = (((SK_U64)pAC->Pnmi.Port[PhysPortIndex]. + CounterHigh[SK_PNMI_HRX_SHORTS] << 32) | (SK_U64)Val32); + pAC->Pnmi.Port[PhysPortIndex].CounterOffset[SK_PNMI_HRX_SHORTS] += + Value - pAC->Pnmi.Port[PhysPortIndex].RxShortZeroMark; + } + + /* Tell VctStatus() that a link was up meanwhile. */ + pAC->Pnmi.VctStatus[PhysPortIndex] |= SK_PNMI_VCT_LINK; break; - case SK_PNMI_EVT_RLMT_PORT_DOWN: + case SK_PNMI_EVT_RLMT_PORT_DOWN: + PhysPortIndex = (unsigned int)Param.Para32[0]; + #ifdef DEBUG - if ((unsigned int)Param.Para32[0] >= SK_MAX_MACS) { + if (PhysPortIndex >= SK_MAX_MACS) { SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL, - ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_PORT_DOWN parameter wrong, PhysPortIndex=%d\n", - (unsigned int)Param.Para32[0])); + ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_PORT_DOWN parameter" + " wrong, PhysPortIndex=%d\n", PhysPortIndex)); return (0); } @@ -2449,14 +1805,24 @@ * Store a trap message in the trap buffer and generate an event for * user space applications with the SK_DRIVER_SENDEVENT macro. */ - QueueRlmtPortTrap(pAC, OID_SKGE_TRAP_RLMT_PORT_DOWN, - (unsigned int)Param.Para32[0]); + QueueRlmtPortTrap(pAC, OID_SKGE_TRAP_RLMT_PORT_DOWN, PhysPortIndex); (void)SK_DRIVER_SENDEVENT(pAC, IoC); + + /* Bugfix #10620 - get zero level for incremental difference */ + if ((pAC->GIni.GIMacType == SK_MAC_XMAC)) { + + (void)pAC->GIni.GIFunc.pFnMacStatistic(pAC, IoC, PhysPortIndex, + XM_RXE_SHT_ERR, &Val32); + pAC->Pnmi.Port[PhysPortIndex].RxShortZeroMark = + (((SK_U64)pAC->Pnmi.Port[PhysPortIndex]. + CounterHigh[SK_PNMI_HRX_SHORTS] << 32) | (SK_U64)Val32); + } break; case SK_PNMI_EVT_RLMT_ACTIVE_DOWN: PhysPortIndex = (unsigned int)Param.Para32[0]; NetIndex = (SK_U32)Param.Para32[1]; + #ifdef DEBUG if (PhysPortIndex >= SK_MAX_MACS) { @@ -2512,7 +1878,7 @@ for (CounterIndex = 0; CounterIndex < SK_PNMI_MAX_IDX; CounterIndex ++) { - if (!StatAddress[CounterIndex].GetOffset) { + if (!StatAddr[CounterIndex][MacType].GetOffset) { continue; } @@ -2533,6 +1899,7 @@ case SK_PNMI_EVT_RLMT_ACTIVE_UP: PhysPortIndex = (unsigned int)Param.Para32[0]; NetIndex = (SK_U32)Param.Para32[1]; + #ifdef DEBUG if (PhysPortIndex >= SK_MAX_MACS) { @@ -2599,7 +1966,7 @@ for (CounterIndex = 0; CounterIndex < SK_PNMI_MAX_IDX; CounterIndex ++) { - if (!StatAddress[CounterIndex].GetOffset) { + if (!StatAddr[CounterIndex][MacType].GetOffset) { continue; } @@ -2644,7 +2011,7 @@ return (SK_PNMI_ERR_UNKNOWN_NET); } - if((unsigned int)Param.Para32[0] == 1){ /* single net mode */ + if ((unsigned int)Param.Para32[0] == 1) { /* single net mode */ pAC->Pnmi.DualNetActiveFlag = SK_FALSE; } else { /* dual net mode */ @@ -2652,6 +2019,49 @@ } break; + case SK_PNMI_EVT_VCT_RESET: + PhysPortIndex = Param.Para32[0]; + pPrt = &pAC->GIni.GP[PhysPortIndex]; + pVctBackupData = &pAC->Pnmi.VctBackup[PhysPortIndex]; + + if (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_PENDING) { + RetCode = SkGmCableDiagStatus(pAC, IoC, PhysPortIndex, SK_FALSE); + if (RetCode == 2) { + /* + * VCT test is still running. + * Start VCT timer counter again. + */ + SK_MEMSET((char *) &Param, 0, sizeof(Param)); + Param.Para32[0] = PhysPortIndex; + Param.Para32[1] = -1; + SkTimerStart(pAC, IoC, &pAC->Pnmi.VctTimeout[PhysPortIndex].VctTimer, + 4000000, SKGE_PNMI, SK_PNMI_EVT_VCT_RESET, Param); + break; + } + pAC->Pnmi.VctStatus[PhysPortIndex] &= ~SK_PNMI_VCT_PENDING; + pAC->Pnmi.VctStatus[PhysPortIndex] |= + (SK_PNMI_VCT_NEW_VCT_DATA | SK_PNMI_VCT_TEST_DONE); + + /* Copy results for later use to PNMI struct. */ + for (i = 0; i < 4; i++) { + if (pPrt->PMdiPairLen[i] > 35) { + CableLength = 1000 * (((175 * pPrt->PMdiPairLen[i]) / 210) - 28); + } + else { + CableLength = 0; + } + pVctBackupData->PMdiPairLen[i] = CableLength; + pVctBackupData->PMdiPairSts[i] = pPrt->PMdiPairSts[i]; + } + + Param.Para32[0] = PhysPortIndex; + Param.Para32[1] = -1; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_RESET, Param); + SkEventDispatcher(pAC, IoC); + } + + break; + default: break; } @@ -2682,8 +2092,7 @@ * calling functions. * SK_PNMI_ERR_UNKNOWN_NET The requested NetIndex doesn't exist */ - -static int PnmiVar( +PNMI_STATIC int PnmiVar( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ int Action, /* Get/PreSet/Set action */ @@ -2738,8 +2147,7 @@ * SK_PNMI_ERR_XXX. The codes are described in the calling functions. * SK_PNMI_ERR_UNKNOWN_NET The requested NetIndex doesn't exist */ - -static int PnmiStruct( +PNMI_STATIC int PnmiStruct( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ int Action, /* Set action to be performed */ @@ -2802,10 +2210,10 @@ pAC->Pnmi.SirqUpdatedFlag ++; /* Preset/Set values */ - for (TableIndex = 0; TableIndex < sizeof(IdTable)/sizeof(IdTable[0]); - TableIndex ++) { + for (TableIndex = 0; TableIndex < ID_TABLE_SIZE; TableIndex ++) { - if (IdTable[TableIndex].Access != SK_PNMI_RW) { + if ((IdTable[TableIndex].Access != SK_PNMI_RW) && + (IdTable[TableIndex].Access != SK_PNMI_WO)) { continue; } @@ -2906,14 +2314,12 @@ * Returns: * The table index or -1 if not found. */ - -static int LookupId( +PNMI_STATIC int LookupId( SK_U32 Id) /* Object identifier to be searched */ { int i; - int Len = sizeof(IdTable)/sizeof(IdTable[0]); - for (i=0; iGIni.GIMacsFound; LogPortMax = SK_PNMI_PORT_PHYS2LOG(PhysPortMax); - if(pAC->Pnmi.DualNetActiveFlag == SK_TRUE){ /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { /* Dual net mode */ LogPortMax--; } @@ -3384,17 +2786,43 @@ case OID_SKGE_STAT_RX_UTIL: return (SK_PNMI_ERR_GENERAL); */ - /* - * Frames longer than IEEE 802.3 frame max size are counted - * by XMAC in frame_too_long counter even reception of long - * frames was enabled and the frame was correct. - * So correct the value by subtracting RxLongFrame counter. - */ - case OID_SKGE_STAT_RX_TOO_LONG: - StatVal = GetStatVal(pAC, IoC, LogPortIndex, - IdTable[TableIndex].Param, NetIndex) - - GetStatVal(pAC, IoC, LogPortIndex, - SK_PNMI_HRX_LONGFRAMES, NetIndex); + case OID_SKGE_STAT_RX: + case OID_SKGE_STAT_TX: + switch (pAC->GIni.GIMacType) { + case SK_MAC_XMAC: + StatVal = GetStatVal(pAC, IoC, LogPortIndex, + IdTable[TableIndex].Param, NetIndex); + break; + + case SK_MAC_GMAC: + if (Id == OID_SKGE_STAT_TX) { + + StatVal = + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HTX_BROADCAST, NetIndex) + + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HTX_MULTICAST, NetIndex) + + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HTX_UNICAST, NetIndex); + } + else { + StatVal = + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HRX_BROADCAST, NetIndex) + + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HRX_MULTICAST, NetIndex) + + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HRX_UNICAST, NetIndex) + + GetStatVal(pAC, IoC, LogPortIndex, + SK_PNMI_HRX_UNDERSIZE, NetIndex); + } + break; + + default: + StatVal = 0; + break; + } + SK_PNMI_STORE_U64(pBuf + Offset, StatVal); break; @@ -3438,8 +2866,7 @@ * exist (e.g. port instance 3 on a two port * adapter. */ - -static int Addr( +PNMI_STATIC int Addr( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ int Action, /* Get/PreSet/Set action */ @@ -3458,8 +2885,6 @@ unsigned int Limit; unsigned int Offset = 0; - - /* * Calculate instance if wished. MAC index 0 is the virtual * MAC. @@ -3467,7 +2892,7 @@ PhysPortMax = pAC->GIni.GIMacsFound; LogPortMax = SK_PNMI_PORT_PHYS2LOG(PhysPortMax); - if(pAC->Pnmi.DualNetActiveFlag == SK_TRUE){ /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { /* Dual net mode */ LogPortMax--; } @@ -3647,8 +3072,7 @@ * exist (e.g. port instance 3 on a two port * adapter. */ - -static int CsumStat( +PNMI_STATIC int CsumStat( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ int Action, /* Get/PreSet/Set action */ @@ -3766,8 +3190,7 @@ * exist (e.g. port instance 3 on a two port * adapter. */ - -static int SensorStat( +PNMI_STATIC int SensorStat( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ int Action, /* Get/PreSet/Set action */ @@ -3976,8 +3399,8 @@ break; default: - SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR013, - SK_PNMI_ERR013MSG); + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR, + ("SensorStat: Unknown OID should be handled before")); return (SK_PNMI_ERR_GENERAL); } @@ -4014,8 +3437,7 @@ * exist (e.g. port instance 3 on a two port * adapter. */ - -static int Vpd( +PNMI_STATIC int Vpd( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ int Action, /* Get/PreSet/Set action */ @@ -4490,8 +3912,7 @@ * exist (e.g. port instance 3 on a two port * adapter. */ - -static int General( +PNMI_STATIC int General( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ int Action, /* Get/PreSet/Set action */ @@ -4515,7 +3936,7 @@ SK_U64 Val64TxHwErrs = 0; SK_BOOL Is64BitReq = SK_FALSE; char Buf[256]; - + int MacType; /* * Check instance. We only handle single instance variables @@ -4534,7 +3955,9 @@ *pLen = 0; return (SK_PNMI_ERR_READ_ONLY); } - + + MacType = pAC->GIni.GIMacType; + /* * Check length for the various supported OIDs */ @@ -4669,8 +4092,7 @@ GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_SYMBOL, NetIndex) + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_SHORTS, NetIndex) + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_RUNT, NetIndex) + - GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_TOO_LONG, NetIndex)- - GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_LONGFRAMES, NetIndex)+ + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_TOO_LONG, NetIndex) + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_FCS, NetIndex) + GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_CEXT, NetIndex); break; @@ -4682,8 +4104,7 @@ GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_EXCESS_COL, NetIndex) + GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_LATE_COL, NetIndex)+ GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_UNDERRUN, NetIndex)+ - GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_CARRIER, NetIndex)+ - GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_EXCESS_COL, NetIndex); + GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_CARRIER, NetIndex); break; } } @@ -4694,7 +4115,7 @@ switch (Id) { case OID_SKGE_SUPPORTED_LIST: - Len = sizeof(IdTable)/sizeof(IdTable[0]) * sizeof(SK_U32); + Len = ID_TABLE_SIZE * sizeof(SK_U32); if (*pLen < Len) { *pLen = Len; @@ -4833,7 +4254,7 @@ break; case OID_SKGE_CHIPSET: - Val16 = SK_PNMI_CHIPSET; + Val16 = pAC->Pnmi.Chipset; SK_PNMI_STORE_U16(pBuf, Val16); *pLen = sizeof(SK_U16); break; @@ -4895,141 +4316,281 @@ break; case OID_SKGE_TX_SW_QUEUE_LEN: - /* Dual net mode */ - if(pAC->Pnmi.DualNetActiveFlag == SK_TRUE){ - Val64 = pAC->Pnmi.Port[NetIndex].TxSwQueueLen; + /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].TxSwQueueLen; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].TxSwQueueLen + + pAC->Pnmi.BufPort[1].TxSwQueueLen; + } } - /* Single net mode */ else { - Val64 = pAC->Pnmi.Port[0].TxSwQueueLen + - pAC->Pnmi.Port[1].TxSwQueueLen; - } + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].TxSwQueueLen; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].TxSwQueueLen + + pAC->Pnmi.Port[1].TxSwQueueLen; + } + } SK_PNMI_STORE_U64(pBuf, Val64); *pLen = sizeof(SK_U64); break; case OID_SKGE_TX_SW_QUEUE_MAX: - /* Dual net mode */ - if(pAC->Pnmi.DualNetActiveFlag == SK_TRUE){ - Val64 = pAC->Pnmi.Port[NetIndex].TxSwQueueMax; + /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].TxSwQueueMax; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].TxSwQueueMax + + pAC->Pnmi.BufPort[1].TxSwQueueMax; + } } - /* Single net mode */ else { - Val64 = pAC->Pnmi.Port[0].TxSwQueueMax + - pAC->Pnmi.Port[1].TxSwQueueMax; + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].TxSwQueueMax; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].TxSwQueueMax + + pAC->Pnmi.Port[1].TxSwQueueMax; + } } SK_PNMI_STORE_U64(pBuf, Val64); *pLen = sizeof(SK_U64); break; case OID_SKGE_TX_RETRY: - /* Dual net mode */ - if(pAC->Pnmi.DualNetActiveFlag == SK_TRUE){ - Val64 = pAC->Pnmi.Port[NetIndex].TxRetryCts; + /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].TxRetryCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].TxRetryCts + + pAC->Pnmi.BufPort[1].TxRetryCts; + } } - /* Single net mode */ else { - Val64 = pAC->Pnmi.Port[0].TxRetryCts + - pAC->Pnmi.Port[1].TxRetryCts; + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].TxRetryCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].TxRetryCts + + pAC->Pnmi.Port[1].TxRetryCts; + } } SK_PNMI_STORE_U64(pBuf, Val64); *pLen = sizeof(SK_U64); break; case OID_SKGE_RX_INTR_CTS: - /* Dual net mode */ - if(pAC->Pnmi.DualNetActiveFlag == SK_TRUE){ - Val64 = pAC->Pnmi.Port[NetIndex].RxIntrCts; + /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].RxIntrCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].RxIntrCts + + pAC->Pnmi.BufPort[1].RxIntrCts; + } } - /* Single net mode */ else { - Val64 = pAC->Pnmi.Port[0].RxIntrCts + - pAC->Pnmi.Port[1].RxIntrCts; + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].RxIntrCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].RxIntrCts + + pAC->Pnmi.Port[1].RxIntrCts; + } } SK_PNMI_STORE_U64(pBuf, Val64); *pLen = sizeof(SK_U64); break; case OID_SKGE_TX_INTR_CTS: - /* Dual net mode */ - if(pAC->Pnmi.DualNetActiveFlag == SK_TRUE){ - Val64 = pAC->Pnmi.Port[NetIndex].TxIntrCts; + /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].TxIntrCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].TxIntrCts + + pAC->Pnmi.BufPort[1].TxIntrCts; + } } - /* Single net mode */ else { - Val64 = pAC->Pnmi.Port[0].TxIntrCts + - pAC->Pnmi.Port[1].TxIntrCts; + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].TxIntrCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].TxIntrCts + + pAC->Pnmi.Port[1].TxIntrCts; + } } SK_PNMI_STORE_U64(pBuf, Val64); *pLen = sizeof(SK_U64); break; case OID_SKGE_RX_NO_BUF_CTS: - /* Dual net mode */ - if(pAC->Pnmi.DualNetActiveFlag == SK_TRUE){ - Val64 = pAC->Pnmi.Port[NetIndex].RxNoBufCts; + /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].RxNoBufCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].RxNoBufCts + + pAC->Pnmi.BufPort[1].RxNoBufCts; + } } - /* Single net mode */ else { - Val64 = pAC->Pnmi.Port[0].RxNoBufCts + - pAC->Pnmi.Port[1].RxNoBufCts; + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].RxNoBufCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].RxNoBufCts + + pAC->Pnmi.Port[1].RxNoBufCts; + } } SK_PNMI_STORE_U64(pBuf, Val64); *pLen = sizeof(SK_U64); break; case OID_SKGE_TX_NO_BUF_CTS: - /* Dual net mode */ - if(pAC->Pnmi.DualNetActiveFlag == SK_TRUE){ - Val64 = pAC->Pnmi.Port[NetIndex].TxNoBufCts; + /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].TxNoBufCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].TxNoBufCts + + pAC->Pnmi.BufPort[1].TxNoBufCts; + } } - /* Single net mode */ else { - Val64 = pAC->Pnmi.Port[0].TxNoBufCts + - pAC->Pnmi.Port[1].TxNoBufCts; + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].TxNoBufCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].TxNoBufCts + + pAC->Pnmi.Port[1].TxNoBufCts; + } } SK_PNMI_STORE_U64(pBuf, Val64); *pLen = sizeof(SK_U64); break; case OID_SKGE_TX_USED_DESCR_NO: - /* Dual net mode */ - if(pAC->Pnmi.DualNetActiveFlag == SK_TRUE){ - Val64 = pAC->Pnmi.Port[NetIndex].TxUsedDescrNo; + /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].TxUsedDescrNo; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].TxUsedDescrNo + + pAC->Pnmi.BufPort[1].TxUsedDescrNo; + } } - /* Single net mode */ else { - Val64 = pAC->Pnmi.Port[0].TxUsedDescrNo + - pAC->Pnmi.Port[1].TxUsedDescrNo; + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].TxUsedDescrNo; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].TxUsedDescrNo + + pAC->Pnmi.Port[1].TxUsedDescrNo; + } } SK_PNMI_STORE_U64(pBuf, Val64); *pLen = sizeof(SK_U64); break; case OID_SKGE_RX_DELIVERED_CTS: - /* Dual net mode */ - if(pAC->Pnmi.DualNetActiveFlag == SK_TRUE){ - Val64 = pAC->Pnmi.Port[NetIndex].RxDeliveredCts; + /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].RxDeliveredCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].RxDeliveredCts + + pAC->Pnmi.BufPort[1].RxDeliveredCts; + } } - /* Single net mode */ else { - Val64 = pAC->Pnmi.Port[0].RxDeliveredCts + - pAC->Pnmi.Port[1].RxDeliveredCts; + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].RxDeliveredCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].RxDeliveredCts + + pAC->Pnmi.Port[1].RxDeliveredCts; + } } SK_PNMI_STORE_U64(pBuf, Val64); *pLen = sizeof(SK_U64); break; case OID_SKGE_RX_OCTETS_DELIV_CTS: - /* Dual net mode */ - if(pAC->Pnmi.DualNetActiveFlag == SK_TRUE){ - Val64 = pAC->Pnmi.Port[NetIndex].RxOctetsDeliveredCts; + /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].RxOctetsDeliveredCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].RxOctetsDeliveredCts + + pAC->Pnmi.BufPort[1].RxOctetsDeliveredCts; + } } - /* Single net mode */ else { - Val64 = pAC->Pnmi.Port[0].RxOctetsDeliveredCts + - pAC->Pnmi.Port[1].RxOctetsDeliveredCts; + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].RxOctetsDeliveredCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].RxOctetsDeliveredCts + + pAC->Pnmi.Port[1].RxOctetsDeliveredCts; + } } SK_PNMI_STORE_U64(pBuf, Val64); *pLen = sizeof(SK_U64); @@ -5046,44 +4607,88 @@ break; case OID_SKGE_IN_ERRORS_CTS: - /* Dual net mode */ - if(pAC->Pnmi.DualNetActiveFlag == SK_TRUE){ - Val64 = Val64RxHwErrs + pAC->Pnmi.Port[NetIndex].RxNoBufCts; + /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = Val64RxHwErrs + pAC->Pnmi.BufPort[NetIndex].RxNoBufCts; + } + /* Single net mode */ + else { + Val64 = Val64RxHwErrs + + pAC->Pnmi.BufPort[0].RxNoBufCts + + pAC->Pnmi.BufPort[1].RxNoBufCts; + } } - /* Single net mode */ else { - Val64 = Val64RxHwErrs + - pAC->Pnmi.Port[0].RxNoBufCts + - pAC->Pnmi.Port[1].RxNoBufCts; + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = Val64RxHwErrs + pAC->Pnmi.Port[NetIndex].RxNoBufCts; + } + /* Single net mode */ + else { + Val64 = Val64RxHwErrs + + pAC->Pnmi.Port[0].RxNoBufCts + + pAC->Pnmi.Port[1].RxNoBufCts; + } } SK_PNMI_STORE_U64(pBuf, Val64); *pLen = sizeof(SK_U64); break; case OID_SKGE_OUT_ERROR_CTS: - /* Dual net mode */ - if(pAC->Pnmi.DualNetActiveFlag == SK_TRUE){ - Val64 = Val64TxHwErrs + pAC->Pnmi.Port[NetIndex].TxNoBufCts; + /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = Val64TxHwErrs + pAC->Pnmi.BufPort[NetIndex].TxNoBufCts; + } + /* Single net mode */ + else { + Val64 = Val64TxHwErrs + + pAC->Pnmi.BufPort[0].TxNoBufCts + + pAC->Pnmi.BufPort[1].TxNoBufCts; + } } - /* Single net mode */ else { - Val64 = Val64TxHwErrs + - pAC->Pnmi.Port[0].TxNoBufCts + - pAC->Pnmi.Port[1].TxNoBufCts; + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = Val64TxHwErrs + pAC->Pnmi.Port[NetIndex].TxNoBufCts; + } + /* Single net mode */ + else { + Val64 = Val64TxHwErrs + + pAC->Pnmi.Port[0].TxNoBufCts + + pAC->Pnmi.Port[1].TxNoBufCts; + } } SK_PNMI_STORE_U64(pBuf, Val64); *pLen = sizeof(SK_U64); break; case OID_SKGE_ERR_RECOVERY_CTS: - /* Dual net mode */ - if(pAC->Pnmi.DualNetActiveFlag == SK_TRUE){ - Val64 = pAC->Pnmi.Port[NetIndex].ErrRecoveryCts; + /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.BufPort[NetIndex].ErrRecoveryCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.BufPort[0].ErrRecoveryCts + + pAC->Pnmi.BufPort[1].ErrRecoveryCts; + } } - /* Single net mode */ else { - Val64 = pAC->Pnmi.Port[0].ErrRecoveryCts + - pAC->Pnmi.Port[1].ErrRecoveryCts; + /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + Val64 = pAC->Pnmi.Port[NetIndex].ErrRecoveryCts; + } + /* Single net mode */ + else { + Val64 = pAC->Pnmi.Port[0].ErrRecoveryCts + + pAC->Pnmi.Port[1].ErrRecoveryCts; + } } SK_PNMI_STORE_U64(pBuf, Val64); *pLen = sizeof(SK_U64); @@ -5103,7 +4708,13 @@ break; case OID_GEN_RCV_ERROR: - Val64 = Val64RxHwErrs + pAC->Pnmi.Port[NetIndex].RxNoBufCts; + /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + Val64 = Val64RxHwErrs + pAC->Pnmi.BufPort[NetIndex].RxNoBufCts; + } + else { + Val64 = Val64RxHwErrs + pAC->Pnmi.Port[NetIndex].RxNoBufCts; + } /* * by default 32bit values are evaluated @@ -5120,7 +4731,13 @@ break; case OID_GEN_XMIT_ERROR: - Val64 = Val64TxHwErrs + pAC->Pnmi.Port[NetIndex].TxNoBufCts; + /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + Val64 = Val64TxHwErrs + pAC->Pnmi.BufPort[NetIndex].TxNoBufCts; + } + else { + Val64 = Val64TxHwErrs + pAC->Pnmi.Port[NetIndex].TxNoBufCts; + } /* * by default 32bit values are evaluated @@ -5137,7 +4754,13 @@ break; case OID_GEN_RCV_NO_BUFFER: - Val64 = pAC->Pnmi.Port[NetIndex].RxNoBufCts; + /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + if (MacType == SK_MAC_XMAC) { + Val64 = pAC->Pnmi.BufPort[NetIndex].RxNoBufCts; + } + else { + Val64 = pAC->Pnmi.Port[NetIndex].RxNoBufCts; + } /* * by default 32bit values are evaluated @@ -5200,8 +4823,7 @@ * exist (e.g. port instance 3 on a two port * adapter. */ - -static int Rlmt( +PNMI_STATIC int Rlmt( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ int Action, /* Get/PreSet/Set action */ @@ -5359,8 +4981,8 @@ break; default: - SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR036, - SK_PNMI_ERR036MSG); + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR, + ("Rlmt: Unknown OID should be handled before")); pAC->Pnmi.RlmtUpdatedFlag --; *pLen = 0; @@ -5506,8 +5128,7 @@ * exist (e.g. port instance 3 on a two port * adapter. */ - -static int RlmtStat( +PNMI_STATIC int RlmtStat( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ int Action, /* Get/PreSet/Set action */ @@ -5543,7 +5164,7 @@ PhysPortIndex = Instance - 1; /* Dual net mode */ - if(pAC->Pnmi.DualNetActiveFlag == SK_TRUE){ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { PhysPortIndex = NetIndex; } @@ -5556,7 +5177,7 @@ Limit = PhysPortMax; /* Dual net mode */ - if(pAC->Pnmi.DualNetActiveFlag == SK_TRUE){ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { PhysPortIndex = NetIndex; Limit = PhysPortIndex + 1; } @@ -5674,8 +5295,8 @@ break; default: - SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR040, - SK_PNMI_ERR040MSG); + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR, + ("RlmtStat: Unknown OID should be errored before")); pAC->Pnmi.RlmtUpdatedFlag --; *pLen = 0; @@ -5709,8 +5330,7 @@ * exist (e.g. port instance 3 on a two port * adapter. */ - -static int MacPrivateConf( +PNMI_STATIC int MacPrivateConf( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ int Action, /* Get/PreSet/Set action */ @@ -5740,7 +5360,7 @@ PhysPortMax = pAC->GIni.GIMacsFound; LogPortMax = SK_PNMI_PORT_PHYS2LOG(PhysPortMax); - if(pAC->Pnmi.DualNetActiveFlag == SK_TRUE){ /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { /* Dual net mode */ LogPortMax--; } @@ -5783,6 +5403,9 @@ case OID_SKGE_PHY_OPERATION_CAP: case OID_SKGE_PHY_OPERATION_MODE: case OID_SKGE_PHY_OPERATION_STATUS: + case OID_SKGE_SPEED_CAP: + case OID_SKGE_SPEED_MODE: + case OID_SKGE_SPEED_STATUS: if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U8)) { *pLen = (Limit - LogPortIndex) * @@ -5836,188 +5459,327 @@ break; case OID_SKGE_LINK_CAP: - if (LogPortIndex == 0) { + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBuf + + Offset); + } + else { + /* Get value for physical ports */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + - Offset); + *(pBuf + Offset) = pAC->GIni.GP[ + PhysPortIndex].PLinkCap; + } + Offset += sizeof(char); } - else { - /* Get value for physical ports */ - PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( - pAC, LogPortIndex); - - *(pBuf + Offset) = pAC->GIni.GP[ - PhysPortIndex].PLinkCap; + else { /* DualNetMode */ + + *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PLinkCap; + Offset += sizeof(char); } - Offset += sizeof(char); break; case OID_SKGE_LINK_MODE: - if (LogPortIndex == 0) { - - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBuf + Offset); - } - else { - /* Get value for physical ports */ - PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( - pAC, LogPortIndex); + } + else { + /* Get value for physical ports */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); - *(pBuf + Offset) = pAC->GIni.GP[ - PhysPortIndex].PLinkModeConf; + *(pBuf + Offset) = pAC->GIni.GP[ + PhysPortIndex].PLinkModeConf; + } + Offset += sizeof(char); + } + else { /* DualNetMode */ + + *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PLinkModeConf; + Offset += sizeof(char); } - - Offset += sizeof(char); break; case OID_SKGE_LINK_MODE_STATUS: - if (LogPortIndex == 0) { - - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + - Offset); + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBuf + + Offset); + } + else { + /* Get value for physical port */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *(pBuf + Offset) = + CalculateLinkModeStatus(pAC, + IoC, PhysPortIndex); + } + Offset += sizeof(char); } - else { - /* Get value for physical port */ - PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( - pAC, LogPortIndex); - - *(pBuf + Offset) = - CalculateLinkModeStatus(pAC, - IoC, PhysPortIndex); + else { /* DualNetMode */ + *(pBuf + Offset) = CalculateLinkModeStatus(pAC, IoC, NetIndex); + Offset += sizeof(char); } - Offset += sizeof(char); break; case OID_SKGE_LINK_STATUS: - if (LogPortIndex == 0) { - - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + - Offset); + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBuf + + Offset); + } + else { + /* Get value for physical ports */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *(pBuf + Offset) = + CalculateLinkStatus(pAC, + IoC, PhysPortIndex); + } + Offset += sizeof(char); } - else { - /* Get value for physical ports */ - PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( - pAC, LogPortIndex); + else { /* DualNetMode */ - *(pBuf + Offset) = - CalculateLinkStatus(pAC, - IoC, PhysPortIndex); + *(pBuf + Offset) = CalculateLinkStatus(pAC, IoC, NetIndex); + Offset += sizeof(char); } - Offset += sizeof(char); break; case OID_SKGE_FLOWCTRL_CAP: - if (LogPortIndex == 0) { - - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + - Offset); + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBuf + + Offset); + } + else { + /* Get value for physical ports */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *(pBuf + Offset) = pAC->GIni.GP[ + PhysPortIndex].PFlowCtrlCap; + } + Offset += sizeof(char); } - else { - /* Get value for physical ports */ - PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( - pAC, LogPortIndex); - - *(pBuf + Offset) = pAC->GIni.GP[ - PhysPortIndex].PFlowCtrlCap; + else { /* DualNetMode */ + + *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PFlowCtrlCap; + Offset += sizeof(char); } - Offset += sizeof(char); break; case OID_SKGE_FLOWCTRL_MODE: - if (LogPortIndex == 0) { - - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + - Offset); + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBuf + + Offset); + } + else { + /* Get value for physical port */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *(pBuf + Offset) = pAC->GIni.GP[ + PhysPortIndex].PFlowCtrlMode; + } + Offset += sizeof(char); } - else { - /* Get value for physical port */ - PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( - pAC, LogPortIndex); + else { /* DualNetMode */ - *(pBuf + Offset) = pAC->GIni.GP[ - PhysPortIndex].PFlowCtrlMode; + *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PFlowCtrlMode; + Offset += sizeof(char); } - Offset += sizeof(char); break; case OID_SKGE_FLOWCTRL_STATUS: - if (LogPortIndex == 0) { - - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + - Offset); + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBuf + + Offset); + } + else { + /* Get value for physical port */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *(pBuf + Offset) = pAC->GIni.GP[ + PhysPortIndex].PFlowCtrlStatus; + } + Offset += sizeof(char); } - else { - /* Get value for physical port */ - PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( - pAC, LogPortIndex); + else { /* DualNetMode */ - *(pBuf + Offset) = pAC->GIni.GP[ - PhysPortIndex].PFlowCtrlStatus; + *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PFlowCtrlStatus; + Offset += sizeof(char); } - Offset += sizeof(char); break; case OID_SKGE_PHY_OPERATION_CAP: - if (LogPortIndex == 0) { - - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + - Offset); + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBuf + + Offset); + } + else { + /* Get value for physical ports */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *(pBuf + Offset) = pAC->GIni.GP[ + PhysPortIndex].PMSCap; + } + Offset += sizeof(char); } - else { - /* Get value for physical ports */ - PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( - pAC, LogPortIndex); - - *(pBuf + Offset) = pAC->GIni.GP[ - PhysPortIndex].PMSCap; + else { /* DualNetMode */ + + *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PMSCap; + Offset += sizeof(char); } - Offset += sizeof(char); break; case OID_SKGE_PHY_OPERATION_MODE: - if (LogPortIndex == 0) { + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + - Offset); - } - else { - /* Get value for physical port */ - PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( - pAC, LogPortIndex); + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBuf + Offset); + } + else { + /* Get value for physical port */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); - *(pBuf + Offset) = pAC->GIni.GP[ - PhysPortIndex].PMSMode; + *(pBuf + Offset) = pAC->GIni.GP[ + PhysPortIndex].PMSMode; + } + Offset += sizeof(char); + } + else { /* DualNetMode */ + + *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PMSMode; + Offset += sizeof(char); } - Offset += sizeof(char); break; case OID_SKGE_PHY_OPERATION_STATUS: - if (LogPortIndex == 0) { + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { - /* Get value for virtual port */ - VirtualConf(pAC, IoC, Id, pBuf + - Offset); + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBuf + Offset); + } + else { + /* Get value for physical port */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *(pBuf + Offset) = pAC->GIni.GP[ + PhysPortIndex].PMSStatus; + } + Offset += sizeof(char); } else { - /* Get value for physical port */ - PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( - pAC, LogPortIndex); + + *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PMSStatus; + Offset += sizeof(char); + } + break; + + case OID_SKGE_SPEED_CAP: + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBuf + + Offset); + } + else { + /* Get value for physical ports */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *(pBuf + Offset) = pAC->GIni.GP[ + PhysPortIndex].PLinkSpeedCap; + } + Offset += sizeof(char); + } + else { /* DualNetMode */ + + *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PLinkSpeedCap; + Offset += sizeof(char); + } + break; + + case OID_SKGE_SPEED_MODE: + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBuf + Offset); + } + else { + /* Get value for physical port */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *(pBuf + Offset) = pAC->GIni.GP[ + PhysPortIndex].PLinkSpeed; + } + Offset += sizeof(char); + } + else { /* DualNetMode */ - *(pBuf + Offset) = pAC->GIni.GP[ - PhysPortIndex].PMSStatus; + *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PLinkSpeed; + Offset += sizeof(char); } - Offset += sizeof(char); break; + case OID_SKGE_SPEED_STATUS: + if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */ + if (LogPortIndex == 0) { + + /* Get value for virtual port */ + VirtualConf(pAC, IoC, Id, pBuf + Offset); + } + else { + /* Get value for physical port */ + PhysPortIndex = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + + *(pBuf + Offset) = pAC->GIni.GP[ + PhysPortIndex].PLinkSpeedUsed; + } + Offset += sizeof(char); + } + else { /* DualNetMode */ + + *(pBuf + Offset) = pAC->GIni.GP[NetIndex].PLinkSpeedUsed; + Offset += sizeof(char); + } + break; + case OID_SKGE_MTU: Val32 = SK_DRIVER_GET_MTU(pAC, IoC, NetIndex); SK_PNMI_STORE_U32(pBuf + Offset, Val32); @@ -6025,8 +5787,8 @@ break; default: - SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR042, - SK_PNMI_ERR042MSG); + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR, + ("MacPrivateConf: Unknown OID should be handled before")); pAC->Pnmi.SirqUpdatedFlag --; return (SK_PNMI_ERR_GENERAL); @@ -6047,6 +5809,7 @@ case OID_SKGE_LINK_MODE: case OID_SKGE_FLOWCTRL_MODE: case OID_SKGE_PHY_OPERATION_MODE: + case OID_SKGE_SPEED_MODE: if (*pLen < Limit - LogPortIndex) { *pLen = Limit - LogPortIndex; @@ -6284,8 +6047,8 @@ EventParam) > 0) { SK_ERR_LOG(pAC, SK_ERRCL_SW, - SK_PNMI_ERR052, - SK_PNMI_ERR052MSG); + SK_PNMI_ERR042, + SK_PNMI_ERR042MSG); *pLen = 0; return (SK_PNMI_ERR_GENERAL); @@ -6304,8 +6067,8 @@ SK_HWEV_SET_ROLE, EventParam) > 0) { SK_ERR_LOG(pAC, SK_ERRCL_SW, - SK_PNMI_ERR052, - SK_PNMI_ERR052MSG); + SK_PNMI_ERR042, + SK_PNMI_ERR042MSG); *pLen = 0; return (SK_PNMI_ERR_GENERAL); @@ -6315,6 +6078,82 @@ Offset += sizeof(char); break; + case OID_SKGE_SPEED_MODE: + /* Check the value range */ + Val8 = *(pBuf + Offset); + if (Val8 == 0) { + + Offset += sizeof(char); + break; + } + if (Val8 < (SK_LSPEED_AUTO) || + (LogPortIndex != 0 && Val8 > (SK_LSPEED_1000MBPS)) || + (LogPortIndex == 0 && Val8 > (SK_LSPEED_INDETERMINATED))) { + + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + + /* The preset ends here */ + if (Action == SK_PNMI_PRESET) { + + return (SK_PNMI_ERR_OK); + } + + if (LogPortIndex == 0) { + + /* + * The virtual port consists of all currently + * active ports. Find them and send an event + * with the new flow control mode to SIRQ. + */ + for (PhysPortIndex = 0; + PhysPortIndex < PhysPortMax; + PhysPortIndex ++) { + + if (!pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) { + + continue; + } + + EventParam.Para32[0] = PhysPortIndex; + EventParam.Para32[1] = (SK_U32)Val8; + if (SkGeSirqEvent(pAC, IoC, + SK_HWEV_SET_SPEED, + EventParam) > 0) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, + SK_PNMI_ERR045, + SK_PNMI_ERR045MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + } + } + else { + /* + * Send an event with the new flow control + * mode to the SIRQ module. + */ + EventParam.Para32[0] = SK_PNMI_PORT_LOG2PHYS( + pAC, LogPortIndex); + EventParam.Para32[1] = (SK_U32)Val8; + if (SkGeSirqEvent(pAC, IoC, + SK_HWEV_SET_SPEED, + EventParam) > 0) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, + SK_PNMI_ERR045, + SK_PNMI_ERR045MSG); + + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + } + Offset += sizeof(char); + break; + case OID_SKGE_MTU : /* Check the value range */ Val32 = *(SK_U32*)(pBuf + Offset); @@ -6341,8 +6180,8 @@ break; default: - SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR045, - SK_PNMI_ERR045MSG); + SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR, + ("MacPrivateConf: Unknown OID should be handled before set")); *pLen = 0; return (SK_PNMI_ERR_GENERAL); @@ -6373,8 +6212,7 @@ * exist (e.g. port instance 3 on a two port * adapter. */ - -static int Monitor( +PNMI_STATIC int Monitor( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ int Action, /* Get/PreSet/Set action */ @@ -6489,8 +6327,7 @@ * Returns: * Nothing */ - -static void VirtualConf( +PNMI_STATIC void VirtualConf( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ SK_U32 Id, /* Object ID that is to be processed */ @@ -6534,8 +6371,7 @@ /* Check if it is the first active port */ if (*pBuf == 0) { - *pBuf = pAC->GIni.GP[PhysPortIndex]. - PLinkModeConf; + *pBuf = pAC->GIni.GP[PhysPortIndex].PLinkModeConf; continue; } @@ -6553,8 +6389,7 @@ case OID_SKGE_LINK_MODE_STATUS: /* Get the link mode of the physical port */ - Val8 = CalculateLinkModeStatus(pAC, IoC, - PhysPortIndex); + Val8 = CalculateLinkModeStatus(pAC, IoC, PhysPortIndex); /* Check if it is the first active port */ if (*pBuf == 0) { @@ -6602,8 +6437,7 @@ /* Check if it is the first active port */ if (*pBuf == 0) { - *pBuf = pAC->GIni.GP[PhysPortIndex]. - PFlowCtrlCap; + *pBuf = pAC->GIni.GP[PhysPortIndex].PFlowCtrlCap; continue; } @@ -6618,8 +6452,7 @@ /* Check if it is the first active port */ if (*pBuf == 0) { - *pBuf = pAC->GIni.GP[PhysPortIndex]. - PFlowCtrlMode; + *pBuf = pAC->GIni.GP[PhysPortIndex].PFlowCtrlMode; continue; } @@ -6628,8 +6461,7 @@ * control mode than the first one, we return a value * that indicates that the mode is indeterminated. */ - if (*pBuf != pAC->GIni.GP[PhysPortIndex]. - PFlowCtrlMode) { + if (*pBuf != pAC->GIni.GP[PhysPortIndex].PFlowCtrlMode) { *pBuf = SK_FLOW_MODE_INDETERMINATED; } @@ -6639,8 +6471,7 @@ /* Check if it is the first active port */ if (*pBuf == 0) { - *pBuf = pAC->GIni.GP[PhysPortIndex]. - PFlowCtrlStatus; + *pBuf = pAC->GIni.GP[PhysPortIndex].PFlowCtrlStatus; continue; } @@ -6650,18 +6481,17 @@ * value that indicates that the status is * indeterminated. */ - if (*pBuf != pAC->GIni.GP[PhysPortIndex]. - PFlowCtrlStatus) { + if (*pBuf != pAC->GIni.GP[PhysPortIndex].PFlowCtrlStatus) { *pBuf = SK_FLOW_STAT_INDETERMINATED; } break; + case OID_SKGE_PHY_OPERATION_CAP: /* Check if it is the first active port */ if (*pBuf == 0) { - *pBuf = pAC->GIni.GP[PhysPortIndex]. - PMSCap; + *pBuf = pAC->GIni.GP[PhysPortIndex].PMSCap; continue; } @@ -6676,8 +6506,7 @@ /* Check if it is the first active port */ if (*pBuf == 0) { - *pBuf = pAC->GIni.GP[PhysPortIndex]. - PMSMode; + *pBuf = pAC->GIni.GP[PhysPortIndex].PMSMode; continue; } @@ -6686,8 +6515,7 @@ * slave mode than the first one, we return a value * that indicates that the mode is indeterminated. */ - if (*pBuf != pAC->GIni.GP[PhysPortIndex]. - PMSMode) { + if (*pBuf != pAC->GIni.GP[PhysPortIndex].PMSMode) { *pBuf = SK_MS_MODE_INDETERMINATED; } @@ -6697,8 +6525,7 @@ /* Check if it is the first active port */ if (*pBuf == 0) { - *pBuf = pAC->GIni.GP[PhysPortIndex]. - PMSStatus; + *pBuf = pAC->GIni.GP[PhysPortIndex].PMSStatus; continue; } @@ -6708,12 +6535,50 @@ * value that indicates that the status is * indeterminated. */ - if (*pBuf != pAC->GIni.GP[PhysPortIndex]. - PMSStatus) { + if (*pBuf != pAC->GIni.GP[PhysPortIndex].PMSStatus) { *pBuf = SK_MS_STAT_INDETERMINATED; } break; + + case OID_SKGE_SPEED_MODE: + /* Check if it is the first active port */ + if (*pBuf == 0) { + + *pBuf = pAC->GIni.GP[PhysPortIndex].PLinkSpeed; + continue; + } + + /* + * If we find an active port with a different flow + * control mode than the first one, we return a value + * that indicates that the mode is indeterminated. + */ + if (*pBuf != pAC->GIni.GP[PhysPortIndex].PLinkSpeed) { + + *pBuf = SK_LSPEED_INDETERMINATED; + } + break; + + case OID_SKGE_SPEED_STATUS: + /* Check if it is the first active port */ + if (*pBuf == 0) { + + *pBuf = pAC->GIni.GP[PhysPortIndex].PLinkSpeedUsed; + continue; + } + + /* + * If we find an active port with a different flow + * control status than the first one, we return a + * value that indicates that the status is + * indeterminated. + */ + if (*pBuf != pAC->GIni.GP[PhysPortIndex].PLinkSpeedUsed) { + + *pBuf = SK_LSPEED_STAT_INDETERMINATED; + } + break; } } @@ -6760,6 +6625,17 @@ case OID_SKGE_PHY_OPERATION_STATUS: *pBuf = SK_MS_STAT_INDETERMINATED; break; + case OID_SKGE_SPEED_CAP: + *pBuf = SK_LSPEED_CAP_INDETERMINATED; + break; + + case OID_SKGE_SPEED_MODE: + *pBuf = SK_LSPEED_INDETERMINATED; + break; + + case OID_SKGE_SPEED_STATUS: + *pBuf = SK_LSPEED_STAT_INDETERMINATED; + break; } } } @@ -6779,8 +6655,7 @@ * Returns: * Link status of physical port */ - -static SK_U8 CalculateLinkStatus( +PNMI_STATIC SK_U8 CalculateLinkStatus( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ unsigned int PhysPortIndex) /* Physical port index */ @@ -6820,8 +6695,7 @@ * Returns: * The link mode status */ - -static SK_U8 CalculateLinkModeStatus( +PNMI_STATIC SK_U8 CalculateLinkModeStatus( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ unsigned int PhysPortIndex) /* Physical port index */ @@ -6869,8 +6743,7 @@ * SK_PNMI_ERR_OK Task successfully performed. * SK_PNMI_ERR_GENERAL Something went wrong. */ - -static int GetVpdKeyArr( +PNMI_STATIC int GetVpdKeyArr( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ char *pKeyArr, /* Ptr KeyArray */ @@ -6966,8 +6839,7 @@ * SK_PNMI_ERR_OK Task successfully performed. * SK_PNMI_ERR_GENERAL Something went wrong. */ - -static int SirqUpdate( +PNMI_STATIC int SirqUpdate( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC) /* IO context handle */ { @@ -7006,8 +6878,7 @@ * SK_PNMI_ERR_OK Task successfully performed. * SK_PNMI_ERR_GENERAL Something went wrong. */ - -static int RlmtUpdate( +PNMI_STATIC int RlmtUpdate( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ @@ -7042,24 +6913,20 @@ * * Description: * The XMAC holds its statistic internally. To obtain the current - * values we must send a command so that the statistic data will - * be written to a predefined memory area on the adapter. + * values we send a command so that the statistic data will + * be written to apredefined memory area on the adapter. * * Returns: * SK_PNMI_ERR_OK Task successfully performed. * SK_PNMI_ERR_GENERAL Something went wrong. */ - -static int MacUpdate( +PNMI_STATIC int MacUpdate( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ unsigned int FirstMac, /* Index of the first Mac to be updated */ unsigned int LastMac) /* Index of the last Mac to be updated */ { unsigned int MacIndex; - SK_U16 StatReg; - unsigned int WaitIndex; - /* * Were the statistics already updated during the @@ -7070,31 +6937,21 @@ return (SK_PNMI_ERR_OK); } - /* Send an update command to all XMACs specified */ + /* Send an update command to all MACs specified */ for (MacIndex = FirstMac; MacIndex <= LastMac; MacIndex ++) { - StatReg = XM_SC_SNP_TXC | XM_SC_SNP_RXC; - XM_OUT16(IoC, MacIndex, XM_STAT_CMD, StatReg); - /* - * It is an auto-clearing register. If the command bits - * went to zero again, the statistics are transfered. - * Normally the command should be executed immediately. - * But just to be sure we execute a loop. + * 2002-09-13 pweber: Freeze the current sw counters. + * (That should be done as close as + * possible to the update of the + * hw counters) */ - for (WaitIndex = 0; WaitIndex < 10; WaitIndex ++) { - - XM_IN16(IoC, MacIndex, XM_STAT_CMD, &StatReg); - if ((StatReg & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) == - 0) { - - break; - } + if (pAC->GIni.GIMacType == SK_MAC_XMAC) { + pAC->Pnmi.BufPort[MacIndex] = pAC->Pnmi.Port[MacIndex]; } - if (WaitIndex == 10 ) { - - SK_ERR_LOG(pAC, SK_ERRCL_HW, SK_PNMI_ERR050, - SK_PNMI_ERR050MSG); + + /* 2002-09-13 pweber: Update the hw counter */ + if (pAC->GIni.GIFunc.pFnMacUpdateStats(pAC, IoC, MacIndex) != 0) { return (SK_PNMI_ERR_GENERAL); } @@ -7119,8 +6976,7 @@ * Returns: * Requested statistic value */ - -static SK_U64 GetStatVal( +PNMI_STATIC SK_U64 GetStatVal( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ unsigned int LogPortIndex, /* Index of the logical Port to be processed */ @@ -7129,16 +6985,15 @@ { unsigned int PhysPortIndex; unsigned int PhysPortMax; - SK_U64 Val = 0; + SK_U64 Val = 0; - if(pAC->Pnmi.DualNetActiveFlag == SK_TRUE){ /* Dual net mode */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { /* Dual net mode */ PhysPortIndex = NetIndex; Val = GetPhysStatVal(pAC, IoC, PhysPortIndex, StatIndex); - } /* end of dual net mode */ - - else { /* single net mode */ + } + else { /* Single Net mode */ if (LogPortIndex == 0) { @@ -7163,7 +7018,7 @@ PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex); Val = GetPhysStatVal(pAC, IoC, PhysPortIndex, StatIndex); } - } /* end of single net mode */ + } return (Val); } @@ -7183,52 +7038,261 @@ * Returns: * Counter value */ - -static SK_U64 GetPhysStatVal( +PNMI_STATIC SK_U64 GetPhysStatVal( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ unsigned int PhysPortIndex, /* Index of the logical Port to be processed */ unsigned int StatIndex) /* Index to statistic value */ { - SK_U64 Val = 0; - SK_U32 LowVal; - SK_U32 HighVal; - + SK_U64 Val = 0; + SK_U32 LowVal = 0; + SK_U32 HighVal = 0; + SK_U16 Word; + int MacType; + + SK_PNMI_PORT *pPnmiPrt; + SK_GEMACFUNC *pFnMac; + + MacType = pAC->GIni.GIMacType; + + /* 2002-09-17 pweber: For XMAC, use the frozen sw counters (BufPort) */ + if (pAC->GIni.GIMacType == SK_MAC_XMAC) { + pPnmiPrt = &pAC->Pnmi.BufPort[PhysPortIndex]; + } + else { + pPnmiPrt = &pAC->Pnmi.Port[PhysPortIndex]; + } + + pFnMac = &pAC->GIni.GIFunc; switch (StatIndex) { - - case SK_PNMI_HTX_OCTET: - XM_IN32(IoC, PhysPortIndex, XM_TXO_OK_LO, &LowVal); - XM_IN32(IoC, PhysPortIndex, XM_TXO_OK_HI, &HighVal); + case SK_PNMI_HTX: + case SK_PNMI_HRX: + /* Not supported by GMAC */ + if (MacType == SK_MAC_GMAC) { + return (Val); + } + + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; break; + case SK_PNMI_HTX_OCTET: case SK_PNMI_HRX_OCTET: - XM_IN32(IoC, PhysPortIndex, XM_RXO_OK_LO, &LowVal); - XM_IN32(IoC, PhysPortIndex, XM_RXO_OK_HI, &HighVal); + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &HighVal); + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex + 1][MacType].Reg, + &LowVal); break; - case SK_PNMI_HTX_OCTETLOW: - case SK_PNMI_HRX_OCTETLOW: - return (Val); + case SK_PNMI_HTX_BURST: + case SK_PNMI_HTX_EXCESS_DEF: + case SK_PNMI_HTX_CARRIER: + /* Not supported by GMAC */ + if (MacType == SK_MAC_GMAC) { + return (Val); + } - case SK_PNMI_HTX_SYNC: - LowVal = (SK_U32)pAC->Pnmi.Port[PhysPortIndex].StatSyncCts; - HighVal = (SK_U32) - (pAC->Pnmi.Port[PhysPortIndex].StatSyncCts >> 32); + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; break; - case SK_PNMI_HTX_SYNC_OCTET: - LowVal = (SK_U32)pAC->Pnmi.Port[PhysPortIndex]. - StatSyncOctetsCts; - HighVal = (SK_U32) - (pAC->Pnmi.Port[PhysPortIndex].StatSyncOctetsCts >> - 32); + case SK_PNMI_HTX_MACC: + /* GMAC only supports PAUSE MAC control frames */ + if (MacType == SK_MAC_GMAC) { + Val = GetPhysStatVal(pAC, IoC, PhysPortIndex, SK_PNMI_HTX_PMACC); + + return (Val); + } + + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + break; + + case SK_PNMI_HTX_COL: + case SK_PNMI_HRX_UNDERSIZE: + /* Not supported by XMAC */ + if (MacType == SK_MAC_XMAC) { + return (Val); + } + + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + break; + + + + case SK_PNMI_HTX_DEFFERAL: + /* Not supported by GMAC */ + if (MacType == SK_MAC_GMAC) { + return (Val); + } + + /* + * XMAC counts frames with deferred transmission + * even in full-duplex mode. + * + * In full-duplex mode the counter remains constant! + */ + if ((pAC->GIni.GP[PhysPortIndex].PLinkModeStatus == SK_LMODE_STAT_AUTOFULL) || + (pAC->GIni.GP[PhysPortIndex].PLinkModeStatus == SK_LMODE_STAT_FULL)) { + + LowVal = 0; + HighVal = 0; + } + else { + /* Otherwise get contents of hardware register. */ + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[SK_PNMI_HTX_DEFFERAL][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + } + break; + + case SK_PNMI_HRX_BADOCTET: + /* Not supported by XMAC */ + if (MacType == SK_MAC_XMAC) { + return (Val); + } + + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &HighVal); + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex + 1][MacType].Reg, + &LowVal); break; + case SK_PNMI_HTX_OCTETLOW: + case SK_PNMI_HRX_OCTETLOW: + case SK_PNMI_HRX_BADOCTETLOW: + return (Val); + case SK_PNMI_HRX_LONGFRAMES: - LowVal = (SK_U32)pAC->Pnmi.Port[PhysPortIndex].StatRxLongFrameCts; - HighVal = (SK_U32) - (pAC->Pnmi.Port[PhysPortIndex].StatRxLongFrameCts >> 32); + /* For XMAC the SW counter is managed by PNMI */ + if (MacType == SK_MAC_XMAC) { + return (pPnmiPrt->StatRxLongFrameCts); + } + + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + break; + + case SK_PNMI_HRX_TOO_LONG: + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + + Val = (((SK_U64)HighVal << 32) | (SK_U64)LowVal); + + switch (MacType) { + case SK_MAC_GMAC: + /* For GMAC the SW counter is additionally managed by PNMI */ + Val += pPnmiPrt->StatRxFrameTooLongCts; + break; + + case SK_MAC_XMAC: + /* + * Frames longer than IEEE 802.3 frame max size are counted + * by XMAC in frame_too_long counter even reception of long + * frames was enabled and the frame was correct. + * So correct the value by subtracting RxLongFrame counter. + */ + Val -= pPnmiPrt->StatRxLongFrameCts; + break; + + default: + break; + } + + LowVal = (SK_U32)Val; + HighVal = (SK_U32)(Val >> 32); + break; + + case SK_PNMI_HRX_SHORTS: + /* Not supported by GMAC */ + if (MacType == SK_MAC_GMAC) { + /* GM_RXE_FRAG?? */ + return (Val); + } + + /* + * XMAC counts short frame errors even if link down (#10620) + * + * If link-down the counter remains constant + */ + if (pAC->GIni.GP[PhysPortIndex].PLinkModeStatus != SK_LMODE_STAT_UNKNOWN) { + + /* Otherwise get incremental difference */ + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + + Val = (((SK_U64)HighVal << 32) | (SK_U64)LowVal); + Val -= pPnmiPrt->RxShortZeroMark; + + LowVal = (SK_U32)Val; + HighVal = (SK_U32)(Val >> 32); + } + break; + + case SK_PNMI_HRX_MACC: + case SK_PNMI_HRX_MACC_UNKWN: + case SK_PNMI_HRX_BURST: + case SK_PNMI_HRX_MISSED: + case SK_PNMI_HRX_FRAMING: + case SK_PNMI_HRX_CARRIER: + case SK_PNMI_HRX_IRLENGTH: + case SK_PNMI_HRX_SYMBOL: + case SK_PNMI_HRX_CEXT: + /* Not supported by GMAC */ + if (MacType == SK_MAC_GMAC) { + /* GM_RXE_FRAG?? */ + return (Val); + } + + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + break; + + case SK_PNMI_HRX_PMACC_ERR: + /* For GMAC the SW counter is managed by PNMI */ + if (MacType == SK_MAC_GMAC) { + return (pPnmiPrt->StatRxPMaccErr); + } + + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; + break; + + /* SW counter managed by PNMI */ + case SK_PNMI_HTX_SYNC: + LowVal = (SK_U32)pPnmiPrt->StatSyncCts; + HighVal = (SK_U32)(pPnmiPrt->StatSyncCts >> 32); + break; + + /* SW counter managed by PNMI */ + case SK_PNMI_HTX_SYNC_OCTET: + LowVal = (SK_U32)pPnmiPrt->StatSyncOctetsCts; + HighVal = (SK_U32)(pPnmiPrt->StatSyncOctetsCts >> 32); break; case SK_PNMI_HRX_FCS: @@ -7240,30 +7304,33 @@ /* do not read while not initialized (PHY_READ hangs!)*/ if (pAC->GIni.GP[PhysPortIndex].PState) { PHY_READ(IoC, &pAC->GIni.GP[PhysPortIndex], - PhysPortIndex, PHY_BCOM_RE_CTR, - &LowVal); - } - else { - LowVal = 0; + PhysPortIndex, PHY_BCOM_RE_CTR, + &Word); + + LowVal = Word; } - HighVal = pAC->Pnmi.Port[PhysPortIndex].CounterHigh[StatIndex]; + HighVal = pPnmiPrt->CounterHigh[StatIndex]; } else { - XM_IN32(IoC, PhysPortIndex, - StatAddress[StatIndex].Param, &LowVal); - HighVal = pAC->Pnmi.Port[PhysPortIndex].CounterHigh[StatIndex]; + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; } + break; + default: - XM_IN32(IoC, PhysPortIndex, StatAddress[StatIndex].Param, - &LowVal); - HighVal = pAC->Pnmi.Port[PhysPortIndex].CounterHigh[StatIndex]; + (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex, + StatAddr[StatIndex][MacType].Reg, + &LowVal); + HighVal = pPnmiPrt->CounterHigh[StatIndex]; break; } Val = (((SK_U64)HighVal << 32) | (SK_U64)LowVal); /* Correct value because of possible XMAC reset. XMAC Errata #2 */ - Val += pAC->Pnmi.Port[PhysPortIndex].CounterOffset[StatIndex]; + Val += pPnmiPrt->CounterOffset[StatIndex]; return (Val); } @@ -7279,8 +7346,7 @@ * Returns: * Nothing */ - -static void ResetCounter( +PNMI_STATIC void ResetCounter( SK_AC *pAC, /* Pointer to adapter context */ SK_IOC IoC, /* IO context handle */ SK_U32 NetIndex) @@ -7305,7 +7371,8 @@ /* Notify CSUM module */ #ifdef SK_USE_CSUM - EventParam.Para64 = (SK_U64)(-1); + EventParam.Para32[0] = NetIndex; + EventParam.Para32[1] = (SK_U32)-1; SkEventQueue(pAC, SKGE_CSUM, SK_CSUM_EVENT_CLEAR_PROTO_STATS, EventParam); #endif @@ -7314,11 +7381,7 @@ for (PhysPortIndex = 0; PhysPortIndex < (unsigned int)pAC->GIni.GIMacsFound; PhysPortIndex ++) { - XM_OUT16(IoC, PhysPortIndex, XM_STAT_CMD, - XM_SC_CLR_RXC | XM_SC_CLR_TXC); - /* Clear two times according to Errata #3 */ - XM_OUT16(IoC, PhysPortIndex, XM_STAT_CMD, - XM_SC_CLR_RXC | XM_SC_CLR_TXC); + (void)pAC->GIni.GIFunc.pFnMacResetCounter(pAC, IoC, PhysPortIndex); SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex].CounterHigh, 0, sizeof(pAC->Pnmi.Port[PhysPortIndex].CounterHigh)); @@ -7333,6 +7396,12 @@ SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex]. StatRxLongFrameCts, 0, sizeof(pAC->Pnmi.Port[ PhysPortIndex].StatRxLongFrameCts)); + SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex]. + StatRxFrameTooLongCts, 0, sizeof(pAC->Pnmi.Port[ + PhysPortIndex].StatRxFrameTooLongCts)); + SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex]. + StatRxPMaccErr, 0, sizeof(pAC->Pnmi.Port[ + PhysPortIndex].StatRxPMaccErr)); } /* @@ -7373,8 +7442,7 @@ * Returns: * A pointer to the trap entry */ - -static char* GetTrapEntry( +PNMI_STATIC char* GetTrapEntry( SK_AC *pAC, /* Pointer to adapter context */ SK_U32 TrapId, /* SNMP ID of the trap */ unsigned int Size) /* Space needed for trap entry */ @@ -7399,11 +7467,11 @@ if (Beg >= Size) { NeededSpace = Size; - Wrap = FALSE; + Wrap = SK_FALSE; } else { NeededSpace = Beg + Size; - Wrap = TRUE; + Wrap = SK_TRUE; } /* @@ -7479,8 +7547,7 @@ * Returns: * Nothing */ - -static void CopyTrapQueue( +PNMI_STATIC void CopyTrapQueue( SK_AC *pAC, /* Pointer to adapter context */ char *pDstBuf) /* Buffer to which the queued traps will be copied */ { @@ -7523,8 +7590,7 @@ * Returns: * Nothing */ - -static void GetTrapQueueLen( +PNMI_STATIC void GetTrapQueueLen( SK_AC *pAC, /* Pointer to adapter context */ unsigned int *pLen, /* Length in Bytes of all queued traps */ unsigned int *pEntries) /* Returns number of trapes stored in queue */ @@ -7566,8 +7632,7 @@ * Returns: * Nothing */ - -static void QueueSimpleTrap( +PNMI_STATIC void QueueSimpleTrap( SK_AC *pAC, /* Pointer to adapter context */ SK_U32 TrapId) /* Type of sensor trap */ { @@ -7585,8 +7650,7 @@ * Returns: * Nothing */ - -static void QueueSensorTrap( +PNMI_STATIC void QueueSensorTrap( SK_AC *pAC, /* Pointer to adapter context */ SK_U32 TrapId, /* Type of sensor trap */ unsigned int SensorIndex) /* Index of sensor which caused the trap */ @@ -7641,8 +7705,7 @@ * Returns: * Nothing */ - -static void QueueRlmtNewMacTrap( +PNMI_STATIC void QueueRlmtNewMacTrap( SK_AC *pAC, /* Pointer to adapter context */ unsigned int ActiveMac) /* Index (0..n) of the currently active port */ { @@ -7669,8 +7732,7 @@ * Returns: * Nothing */ - -static void QueueRlmtPortTrap( +PNMI_STATIC void QueueRlmtPortTrap( SK_AC *pAC, /* Pointer to adapter context */ SK_U32 TrapId, /* Type of RLMT port trap */ unsigned int PortIndex) /* Index of the port, which changed its state */ @@ -7697,8 +7759,7 @@ * Returns: * Nothing */ - -static void CopyMac( +PNMI_STATIC void CopyMac( char *pDst, /* Pointer to destination buffer */ SK_MAC_ADDR *pMac) /* Pointer of Source */ { @@ -7710,3 +7771,536 @@ *(pDst + i) = pMac->a[i]; } } + + +#ifdef SK_POWER_MGMT +/***************************************************************************** + * + * PowerManagement - OID handler function of PowerManagement OIDs + * + * Description: + * The code is simple. No description necessary. + * + * Returns: + * SK_PNMI_ERR_OK The request was successfully performed. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter. + */ + +PNMI_STATIC int PowerManagement( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* Get/PreSet/Set action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer to which to mgmt data will be retrieved */ +unsigned int *pLen, /* On call: buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */ +unsigned int TableIndex, /* Index to the Id table */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */ +{ + + SK_U32 RetCode = SK_PNMI_ERR_GENERAL; + + /* + * Check instance. We only handle single instance variables + */ + if (Instance != (SK_U32)(-1) && Instance != 1) { + + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + + /* + * Perform action + */ + if (Action == SK_PNMI_GET) { + + /* + * Check length + */ + switch (Id) { + + case OID_PNP_CAPABILITIES: + if (*pLen < sizeof(SK_PNP_CAPABILITIES)) { + + *pLen = sizeof(SK_PNP_CAPABILITIES); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + case OID_PNP_QUERY_POWER: + case OID_PNP_ENABLE_WAKE_UP: + if (*pLen < sizeof(SK_U32)) { + + *pLen = sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + case OID_PNP_SET_POWER: + case OID_PNP_ADD_WAKE_UP_PATTERN: + case OID_PNP_REMOVE_WAKE_UP_PATTERN: + break; + + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR040, + SK_PNMI_ERR040MSG); + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + /* + * Get value + */ + switch (Id) { + + case OID_PNP_CAPABILITIES: + RetCode = SkPowerQueryPnPCapabilities(pAC, IoC, pBuf, pLen); + break; + + case OID_PNP_QUERY_POWER: + /* The Windows DDK describes: An OID_PNP_QUERY_POWER requests + the miniport to indicate whether it can transition its NIC + to the low-power state. + A miniport driver must always return NDIS_STATUS_SUCCESS + to a query of OID_PNP_QUERY_POWER. */ + RetCode = SK_PNMI_ERR_OK; + break; + + /* NDIS handles these OIDs as write-only. + * So in case of get action the buffer with written length = 0 + * is returned + */ + case OID_PNP_SET_POWER: + case OID_PNP_ADD_WAKE_UP_PATTERN: + case OID_PNP_REMOVE_WAKE_UP_PATTERN: + *pLen = 0; + RetCode = SK_PNMI_ERR_OK; + break; + + case OID_PNP_ENABLE_WAKE_UP: + RetCode = SkPowerGetEnableWakeUp(pAC, IoC, pBuf, pLen); + break; + + default: + RetCode = SK_PNMI_ERR_GENERAL; + break; + } + + return (RetCode); + } + + /* + * From here SET or PRESET action. Check if the passed + * buffer length is plausible. + */ + switch (Id) { + case OID_PNP_SET_POWER: + case OID_PNP_ENABLE_WAKE_UP: + if (*pLen < sizeof(SK_U32)) { + + *pLen = sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + if (*pLen != sizeof(SK_U32)) { + + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + break; + + case OID_PNP_ADD_WAKE_UP_PATTERN: + case OID_PNP_REMOVE_WAKE_UP_PATTERN: + if (*pLen < sizeof(SK_PM_PACKET_PATTERN)) { + + *pLen = 0; + return (SK_PNMI_ERR_BAD_VALUE); + } + break; + + default: + *pLen = 0; + return (SK_PNMI_ERR_READ_ONLY); + } + + /* + * Perform preset or set + */ + + /* POWER module does not support PRESET action */ + if (Action == SK_PNMI_PRESET) { + return (SK_PNMI_ERR_OK); + } + + switch (Id) { + case OID_PNP_SET_POWER: + RetCode = SkPowerSetPower(pAC, IoC, pBuf, pLen); + break; + + case OID_PNP_ADD_WAKE_UP_PATTERN: + RetCode = SkPowerAddWakeUpPattern(pAC, IoC, pBuf, pLen); + break; + + case OID_PNP_REMOVE_WAKE_UP_PATTERN: + RetCode = SkPowerRemoveWakeUpPattern(pAC, IoC, pBuf, pLen); + break; + + case OID_PNP_ENABLE_WAKE_UP: + RetCode = SkPowerSetEnableWakeUp(pAC, IoC, pBuf, pLen); + break; + + default: + RetCode = SK_PNMI_ERR_GENERAL; + } + + return (RetCode); +} +#endif /* SK_POWER_MGMT */ + + +/***************************************************************************** + * + * Vct - OID handler function of OIDs + * + * Description: + * The code is simple. No description necessary. + * + * Returns: + * SK_PNMI_ERR_OK The request was performed successfully. + * SK_PNMI_ERR_GENERAL A general severe internal error occured. + * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain + * the correct data (e.g. a 32bit value is + * needed, but a 16 bit value was passed). + * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't + * exist (e.g. port instance 3 on a two port + * adapter). + * SK_PNMI_ERR_READ_ONLY Only the Get action is allowed. + * + */ + +PNMI_STATIC int Vct( +SK_AC *pAC, /* Pointer to adapter context */ +SK_IOC IoC, /* IO context handle */ +int Action, /* Get/PreSet/Set action */ +SK_U32 Id, /* Object ID that is to be processed */ +char *pBuf, /* Buffer to which the mgmt data will be copied */ +unsigned int *pLen, /* On call: buffer length. On return: used buffer */ +SK_U32 Instance, /* Instance (-1,2..n) that is to be queried */ +unsigned int TableIndex, /* Index to the Id table */ +SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */ +{ + SK_GEPORT *pPrt; + SK_PNMI_VCT *pVctBackupData; + SK_U32 LogPortMax; + SK_U32 PhysPortMax; + SK_U32 PhysPortIndex; + SK_U32 Limit; + SK_U32 Offset; + + SK_BOOL Link; + SK_U32 RetCode = SK_PNMI_ERR_GENERAL; + int i; + SK_EVPARA Para; + SK_U32 CableLength; + + /* + * Calculate the port indexes from the instance. + */ + PhysPortMax = pAC->GIni.GIMacsFound; + LogPortMax = SK_PNMI_PORT_PHYS2LOG(PhysPortMax); + + /* Dual net mode? */ + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + LogPortMax--; + } + + if ((Instance != (SK_U32) (-1))) { + /* Check instance range. */ + if ((Instance < 2) || (Instance > LogPortMax)) { + *pLen = 0; + return (SK_PNMI_ERR_UNKNOWN_INST); + } + + if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { + PhysPortIndex = NetIndex; + } + else { + PhysPortIndex = Instance - 2; + } + Limit = PhysPortIndex + 1; + } + else { /* + * Instance == (SK_U32) (-1), get all Instances of that OID. + * + * Not implemented yet. May be used in future releases. + */ + PhysPortIndex = 0; + Limit = PhysPortMax; + } + + pPrt = &pAC->GIni.GP[PhysPortIndex]; + if (pPrt->PHWLinkUp) { + Link = SK_TRUE; + } + else { + Link = SK_FALSE; + } + + /* + * Check MAC type. + */ + if (pPrt->PhyType != SK_PHY_MARV_COPPER) { + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + /* Initialize backup data pointer. */ + pVctBackupData = &pAC->Pnmi.VctBackup[PhysPortIndex]; + + /* + * Check action type. + */ + if (Action == SK_PNMI_GET) { + /* + * Check length. + */ + switch (Id) { + + case OID_SKGE_VCT_GET: + if (*pLen < (Limit - PhysPortIndex) * sizeof(SK_PNMI_VCT)) { + *pLen = (Limit - PhysPortIndex) * sizeof(SK_PNMI_VCT); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + case OID_SKGE_VCT_STATUS: + if (*pLen < (Limit - PhysPortIndex) * sizeof(SK_U8)) { + *pLen = (Limit - PhysPortIndex) * sizeof(SK_U8); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + default: + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + /* + * Get value. + */ + Offset = 0; + for (; PhysPortIndex < Limit; PhysPortIndex++) { + switch (Id) { + + case OID_SKGE_VCT_GET: + if ((Link == SK_FALSE) && + (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_PENDING)) { + RetCode = SkGmCableDiagStatus(pAC, IoC, PhysPortIndex, SK_FALSE); + if (RetCode == 0) { + pAC->Pnmi.VctStatus[PhysPortIndex] &= ~SK_PNMI_VCT_PENDING; + pAC->Pnmi.VctStatus[PhysPortIndex] |= + (SK_PNMI_VCT_NEW_VCT_DATA | SK_PNMI_VCT_TEST_DONE); + + /* Copy results for later use to PNMI struct. */ + for (i = 0; i < 4; i++) { + if (pPrt->PMdiPairSts[i] == SK_PNMI_VCT_NORMAL_CABLE) { + if ((pPrt->PMdiPairLen[i] > 35) && (pPrt->PMdiPairLen[i] < 0xff)) { + pPrt->PMdiPairSts[i] = SK_PNMI_VCT_IMPEDANCE_MISMATCH; + } + } + if ((pPrt->PMdiPairLen[i] > 35) && (pPrt->PMdiPairLen[i] != 0xff)) { + CableLength = 1000 * (((175 * pPrt->PMdiPairLen[i]) / 210) - 28); + } + else { + CableLength = 0; + } + pVctBackupData->PMdiPairLen[i] = CableLength; + pVctBackupData->PMdiPairSts[i] = pPrt->PMdiPairSts[i]; + } + + Para.Para32[0] = PhysPortIndex; + Para.Para32[1] = -1; + SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_RESET, Para); + SkEventDispatcher(pAC, IoC); + } + else { + ; /* VCT test is running. */ + } + } + + /* Get all results. */ + CheckVctStatus(pAC, IoC, pBuf, Offset, PhysPortIndex); + Offset += sizeof(SK_U8); + *(pBuf + Offset) = pPrt->PCableLen; + Offset += sizeof(SK_U8); + for (i = 0; i < 4; i++) { + SK_PNMI_STORE_U32((pBuf + Offset), pVctBackupData->PMdiPairLen[i]); + Offset += sizeof(SK_U32); + } + for (i = 0; i < 4; i++) { + *(pBuf + Offset) = pVctBackupData->PMdiPairSts[i]; + Offset += sizeof(SK_U8); + } + + RetCode = SK_PNMI_ERR_OK; + break; + + case OID_SKGE_VCT_STATUS: + CheckVctStatus(pAC, IoC, pBuf, Offset, PhysPortIndex); + Offset += sizeof(SK_U8); + RetCode = SK_PNMI_ERR_OK; + break; + + default: + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + } /* for */ + *pLen = Offset; + return (RetCode); + + } /* if SK_PNMI_GET */ + + /* + * From here SET or PRESET action. Check if the passed + * buffer length is plausible. + */ + + /* + * Check length. + */ + switch (Id) { + case OID_SKGE_VCT_SET: + if (*pLen < (Limit - PhysPortIndex) * sizeof(SK_U32)) { + *pLen = (Limit - PhysPortIndex) * sizeof(SK_U32); + return (SK_PNMI_ERR_TOO_SHORT); + } + break; + + default: + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + + /* + * Perform preset or set. + */ + + /* VCT does not support PRESET action. */ + if (Action == SK_PNMI_PRESET) { + return (SK_PNMI_ERR_OK); + } + + Offset = 0; + for (; PhysPortIndex < Limit; PhysPortIndex++) { + switch (Id) { + case OID_SKGE_VCT_SET: /* Start VCT test. */ + if (Link == SK_FALSE) { + SkGeStopPort(pAC, IoC, PhysPortIndex, SK_STOP_ALL, SK_SOFT_RST); + + RetCode = SkGmCableDiagStatus(pAC, IoC, PhysPortIndex, SK_TRUE); + if (RetCode == 0) { /* RetCode: 0 => Start! */ + pAC->Pnmi.VctStatus[PhysPortIndex] |= SK_PNMI_VCT_PENDING; + pAC->Pnmi.VctStatus[PhysPortIndex] &= ~SK_PNMI_VCT_NEW_VCT_DATA; + pAC->Pnmi.VctStatus[PhysPortIndex] &= ~SK_PNMI_VCT_LINK; + + /* + * Start VCT timer counter. + */ + SK_MEMSET((char *) &Para, 0, sizeof(Para)); + Para.Para32[0] = PhysPortIndex; + Para.Para32[1] = -1; + SkTimerStart(pAC, IoC, &pAC->Pnmi.VctTimeout[PhysPortIndex].VctTimer, + 4000000, SKGE_PNMI, SK_PNMI_EVT_VCT_RESET, Para); + SK_PNMI_STORE_U32((pBuf + Offset), RetCode); + RetCode = SK_PNMI_ERR_OK; + } + else { /* RetCode: 2 => Running! */ + SK_PNMI_STORE_U32((pBuf + Offset), RetCode); + RetCode = SK_PNMI_ERR_OK; + } + } + else { /* RetCode: 4 => Link! */ + RetCode = 4; + SK_PNMI_STORE_U32((pBuf + Offset), RetCode); + RetCode = SK_PNMI_ERR_OK; + } + Offset += sizeof(SK_U32); + break; + + default: + *pLen = 0; + return (SK_PNMI_ERR_GENERAL); + } + } /* for */ + *pLen = Offset; + return (RetCode); + +} /* Vct */ + + +PNMI_STATIC void CheckVctStatus( +SK_AC *pAC, +SK_IOC IoC, +char *pBuf, +SK_U32 Offset, +SK_U32 PhysPortIndex) +{ + SK_GEPORT *pPrt; + SK_PNMI_VCT *pVctData; + SK_U32 RetCode; + SK_U8 LinkSpeedUsed; + + pPrt = &pAC->GIni.GP[PhysPortIndex]; + + pVctData = (SK_PNMI_VCT *) (pBuf + Offset); + pVctData->VctStatus = SK_PNMI_VCT_NONE; + + if (!pPrt->PHWLinkUp) { + + /* Was a VCT test ever made before? */ + if (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_TEST_DONE) { + if ((pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_LINK)) { + pVctData->VctStatus |= SK_PNMI_VCT_OLD_VCT_DATA; + } + else { + pVctData->VctStatus |= SK_PNMI_VCT_NEW_VCT_DATA; + } + } + + /* Check VCT test status. */ + RetCode = SkGmCableDiagStatus(pAC,IoC, PhysPortIndex, SK_FALSE); + if (RetCode == 2) { /* VCT test is running. */ + pVctData->VctStatus |= SK_PNMI_VCT_RUNNING; + } + else { /* VCT data was copied to pAC here. Check PENDING state. */ + if (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_PENDING) { + pVctData->VctStatus |= SK_PNMI_VCT_NEW_VCT_DATA; + } + } + + if (pPrt->PCableLen != 0xff) { /* Old DSP value. */ + pVctData->VctStatus |= SK_PNMI_VCT_OLD_DSP_DATA; + } + } + else { + + /* Was a VCT test ever made before? */ + if (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_TEST_DONE) { + pVctData->VctStatus &= ~SK_PNMI_VCT_NEW_VCT_DATA; + pVctData->VctStatus |= SK_PNMI_VCT_OLD_VCT_DATA; + } + + /* DSP only valid in 100/1000 modes. */ + LinkSpeedUsed = pAC->GIni.GP[PhysPortIndex].PLinkSpeedUsed; + if (LinkSpeedUsed != SK_LSPEED_STAT_10MBPS) { + pVctData->VctStatus |= SK_PNMI_VCT_NEW_DSP_DATA; + } + } + +} /* CheckVctStatus */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/skgesirq.c linux.21pre4-ac2/drivers/net/sk98lin/skgesirq.c --- linux.21pre4/drivers/net/sk98lin/skgesirq.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/skgesirq.c 2003-01-06 15:38:18.000000000 +0000 @@ -2,15 +2,15 @@ * * Name: skgesirq.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.65 $ - * Date: $Date: 2001/02/23 13:41:51 $ + * Version: $Revision: 1.81 $ + * Date: $Date: 2002/12/05 10:49:51 $ * Purpose: Special IRQ module * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2000 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +26,89 @@ * History: * * $Log: skgesirq.c,v $ + * Revision 1.81 2002/12/05 10:49:51 rschmidt + * Fixed missing Link Down Event for fiber (Bug Id #10768) + * Added reading of cable length when link is up + * Removed testing of unused error bits in PHY ISR + * Editorial changes. + * + * Revision 1.80 2002/11/12 17:15:21 rschmidt + * Replaced SkPnmiGetVar() by ...MacStatistic() in SkMacParity(). + * Editorial changes. + * + * Revision 1.79 2002/10/14 15:14:51 rschmidt + * Changed clearing of IS_M1_PAR_ERR (MAC 1 Parity Error) in + * SkMacParity() depending on GIChipRev (HW-Bug #8). + * Added error messages for GPHY Auto-Negotiation Error and + * FIFO Overflow/Underrun in SkPhyIsrGmac(). + * Editorial changes. + * + * Revision 1.78 2002/10/10 15:54:29 mkarl + * changes for PLinkSpeedUsed + * + * Revision 1.77 2002/09/12 08:58:51 rwahl + * Retrieve counters needed for XMAC errata workarounds directly because + * PNMI returns corrected counter values (e.g. #10620). + * + * Revision 1.76 2002/08/16 15:21:54 rschmidt + * Replaced all if(GIChipId == CHIP_ID_GENESIS) with new entry GIGenesis. + * Replaced wrong 1st para pAC with IoC in SK_IN/OUT macros. + * Editorial changes. + * + * Revision 1.75 2002/08/12 13:50:47 rschmidt + * Changed clearing of IS_M1_PAR_ERR (MAC 1 Parity Error) in + * SkMacParity() by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE (HW-Bug #8). + * Added clearing of IS_IRQ_TIST_OV and IS_IRQ_SENSOR in SkGeHwErr(). + * Corrected handling of Link Up and Auto-Negotiation Over for GPHY. + * in SkGePortCheckUpGmac(). + * Editorial changes. + * + * Revision 1.74 2002/08/08 16:17:04 rschmidt + * Added PhyType check for SK_HWEV_SET_ROLE event (copper only) + * Changed Link Up check reading PHY Specific Status (YUKON) + * Editorial changes + * + * Revision 1.73 2002/07/15 18:36:53 rwahl + * Editorial changes. + * + * Revision 1.72 2002/07/15 15:46:26 rschmidt + * Added new event: SK_HWEV_SET_SPEED + * Editorial changes + * + * Revision 1.71 2002/06/10 09:34:19 rschmidt + * Editorial changes + * + * Revision 1.70 2002/06/05 08:29:18 rschmidt + * SkXmRxTxEnable() replaced by SkMacRxTxEnable(). + * Editorial changes. + * + * Revision 1.69 2002/04/25 13:03:49 rschmidt + * Changes for handling YUKON. + * Use of #ifdef OTHER_PHY to eliminate code for unused Phy types. + * Replaced all XMAC-access macros by functions: SkMacRxTxDisable(), + * SkMacIrqDisable(). + * Added handling for GMAC FIFO in SkMacParity(). + * Replaced all SkXm...() functions with SkMac...() to handle also + * YUKON's GMAC. + * Macros for XMAC PHY access PHY_READ(), PHY_WRITE() replaced + * by functions SkXmPhyRead(), SkXmPhyWrite(). + * Disabling all PHY interrupts moved to SkMacIrqDisable(). + * Added handling for GPHY IRQ in SkGeSirqIsr(). + * Removed status parameter from MAC IRQ handler SkMacIrq(). + * Added SkGePortCheckUpGmac(), SkPhyIsrGmac() for GMAC. + * Editorial changes + * + * Revision 1.68 2002/02/26 15:24:53 rwahl + * Fix: no link with manual configuration (#10673). The previous fix for + * #10639 was removed. So for RLMT mode = CLS the RLMT may switch to + * misconfigured port. It should not occur for the other RLMT modes. + * + * Revision 1.67 2001/11/20 09:19:58 rwahl + * Reworked bugfix #10639 (no dependency to RLMT mode). + * + * Revision 1.66 2001/10/26 07:52:53 afischer + * Port switching bug in `check local link` mode + * * Revision 1.65 2001/02/23 13:41:51 gklug * fix: PHYS2INST should be used correctly for Dual Net operation * chg: do no longer work with older PNMI @@ -254,49 +337,53 @@ * * In the ISR of the driver the bits for frame transmission complete and * for receive complete are checked and handled by the driver itself. - * The bits of the slow path mask are checked after this and then the - * entry into the so-called "slow path" is prepared. It is an implemetors + * The bits of the slow path mask are checked after that and then the + * entry into the so-called "slow path" is prepared. It is an implementors * decision whether this is executed directly or just scheduled by - * disabling the mask. In the interrupt service routine events may be + * disabling the mask. In the interrupt service routine some events may be * generated, so it would be a good idea to call the EventDispatcher * right after this ISR. * - * The Interrupt service register of the adapter is NOT read by this - * module. SO if the drivers implemetor needs a while loop around the - * slow data paths Interrupt bits, he needs to call the SkGeIsr() for + * The Interrupt source register of the adapter is NOT read by this module. + * SO if the drivers implementor needs a while loop around the + * slow data paths interrupt bits, he needs to call the SkGeSirqIsr() for * each loop entered. * - * However, the XMAC Interrupt status registers are read in a while loop. + * However, the MAC Interrupt status registers are read in a while loop. * */ - + static const char SysKonnectFileId[] = - "$Id: skgesirq.c,v 1.65 2001/02/23 13:41:51 gklug Exp $" ; + "$Id: skgesirq.c,v 1.81 2002/12/05 10:49:51 rschmidt Exp $" ; #include "h/skdrv1st.h" /* Driver Specific Definitions */ #include "h/skgepnmi.h" /* PNMI Definitions */ #include "h/skrlmt.h" /* RLMT Definitions */ -#include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */ +#include "h/skdrv2nd.h" /* Adapter Control and Driver specific Def. */ /* local function prototypes */ static int SkGePortCheckUpXmac(SK_AC*, SK_IOC, int); static int SkGePortCheckUpBcom(SK_AC*, SK_IOC, int); +static int SkGePortCheckUpGmac(SK_AC*, SK_IOC, int); +static void SkPhyIsrBcom(SK_AC*, SK_IOC, int, SK_U16); +static void SkPhyIsrGmac(SK_AC*, SK_IOC, int, SK_U16); +#ifdef OTHER_PHY static int SkGePortCheckUpLone(SK_AC*, SK_IOC, int); static int SkGePortCheckUpNat(SK_AC*, SK_IOC, int); -static void SkPhyIsrBcom(SK_AC*, SK_IOC, int, SK_U16); static void SkPhyIsrLone(SK_AC*, SK_IOC, int, SK_U16); +#endif /* OTHER_PHY */ /* - * Define an array of RX counter which are checked - * in AutoSense mode to check whether a link is not able to autonegotiate. + * array of Rx counter from XMAC which are checked + * in AutoSense mode to check whether a link is not able to auto-negotiate. */ -static const SK_U32 SkGeRxOids[]= { - OID_SKGE_STAT_RX_64, - OID_SKGE_STAT_RX_127, - OID_SKGE_STAT_RX_255, - OID_SKGE_STAT_RX_511, - OID_SKGE_STAT_RX_1023, - OID_SKGE_STAT_RX_MAX, +static const SK_U16 SkGeRxRegs[]= { + XM_RXF_64B, + XM_RXF_127B, + XM_RXF_255B, + XM_RXF_511B, + XM_RXF_1023B, + XM_RXF_MAX_SZ } ; #ifdef __C2MAN__ @@ -310,7 +397,7 @@ {} #endif -/* Define return codes of SkGePortCheckUp and CheckShort. */ +/* Define return codes of SkGePortCheckUp and CheckShort */ #define SK_HW_PS_NONE 0 /* No action needed */ #define SK_HW_PS_RESTART 1 /* Restart needed */ #define SK_HW_PS_LINK 2 /* Link Up actions needed */ @@ -320,17 +407,17 @@ * SkHWInitDefSense() - Default Autosensing mode initialization * * Description: - * This function handles the Hardware link down signal + * This function sets the PLinkMode for HWInit * * Note: * */ -void SkHWInitDefSense( +static void SkHWInitDefSense( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ -int Port) /* Port Index (MAC_1 + n) */ +int Port) /* Port Index (MAC_1 + n) */ { - SK_GEPORT *pPrt; + SK_GEPORT *pPrt; /* GIni Port struct pointer */ pPrt = &pAC->GIni.GP[Port]; @@ -343,8 +430,7 @@ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, ("AutoSensing: First mode %d on Port %d\n", - (int)SK_LMODE_AUTOFULL, - Port)); + (int)SK_LMODE_AUTOFULL, Port)); pPrt->PLinkMode = SK_LMODE_AUTOFULL; @@ -362,12 +448,12 @@ * Note: * */ -SK_U8 SkHWSenseGetNext( +SK_U8 SkHWSenseGetNext( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ -int Port) /* Port Index (MAC_1 + n) */ +int Port) /* Port Index (MAC_1 + n) */ { - SK_GEPORT *pPrt; + SK_GEPORT *pPrt; /* GIni Port struct pointer */ pPrt = &pAC->GIni.GP[Port]; @@ -375,16 +461,16 @@ if (pPrt->PLinkModeConf != SK_LMODE_AUTOSENSE) { /* Leave all as configured */ - return (pPrt->PLinkModeConf); + return(pPrt->PLinkModeConf); } if (pPrt->PLinkMode == SK_LMODE_AUTOFULL) { /* Return next mode AUTOBOTH */ - return (SK_LMODE_AUTOBOTH); + return(SK_LMODE_AUTOBOTH); } /* Return default autofull */ - return (SK_LMODE_AUTOFULL); + return(SK_LMODE_AUTOFULL); } /* SkHWSenseGetNext */ @@ -398,13 +484,13 @@ * Note: * */ -void SkHWSenseSetNext( +void SkHWSenseSetNext( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ -int Port, /* Port Index (MAC_1 + n) */ +int Port, /* Port Index (MAC_1 + n) */ SK_U8 NewMode) /* New Mode to be written in sense mode */ { - SK_GEPORT *pPrt; + SK_GEPORT *pPrt; /* GIni Port struct pointer */ pPrt = &pAC->GIni.GP[Port]; @@ -415,7 +501,9 @@ } SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, - ("AutoSensing: next mode %d on Port %d\n", (int)NewMode, Port)); + ("AutoSensing: next mode %d on Port %d\n", + (int)NewMode, Port)); + pPrt->PLinkMode = NewMode; return; @@ -432,73 +520,47 @@ * Note: * */ -void SkHWLinkDown( +void SkHWLinkDown( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ int Port) /* Port Index (MAC_1 + n) */ { - SK_GEPORT *pPrt; - SK_U16 Word; + SK_GEPORT *pPrt; /* GIni Port struct pointer */ pPrt = &pAC->GIni.GP[Port]; - /* Disable all XMAC interrupts. */ - XM_OUT16(IoC, Port, XM_IMSK, 0xffff); + /* Disable all MAC interrupts */ + SkMacIrqDisable(pAC, IoC, Port); - /* Disable Receiver and Transmitter. */ - XM_IN16(IoC, Port, XM_MMU_CMD, &Word); - XM_OUT16(IoC, Port, XM_MMU_CMD, Word & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); + /* Disable Receiver and Transmitter */ + SkMacRxTxDisable(pAC, IoC, Port); - /* Disable all PHY interrupts. */ - switch (pPrt->PhyType) { - case SK_PHY_BCOM: - /* Make sure that PHY is initialized. */ - if (pAC->GIni.GP[Port].PState) { - /* NOT allowed if BCOM is in RESET state */ - /* Workaround BCOM Errata (#10523) all BCom. */ - /* Disable Power Management if link is down. */ - PHY_READ(IoC, pPrt, Port, PHY_BCOM_AUX_CTRL, &Word); - PHY_WRITE(IoC, pPrt, Port, PHY_BCOM_AUX_CTRL, - Word | PHY_B_AC_DIS_PM); - PHY_WRITE(IoC, pPrt, Port, PHY_BCOM_INT_MASK, 0xffff); - } - break; - case SK_PHY_LONE: - PHY_WRITE(IoC, pPrt, Port, PHY_LONE_INT_ENAB, 0x0); - break; - case SK_PHY_NAT: - /* todo: National - PHY_WRITE(IoC, pPrt, Port, PHY_NAT_INT_MASK, 0xffff); */ - break; - } - - /* Init default sense mode. */ + /* Init default sense mode */ SkHWInitDefSense(pAC, IoC, Port); if (!pPrt->PHWLinkUp) { return; - } + } - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_IRQ, + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, ("Link down Port %d\n", Port)); - /* Set Link to DOWN. */ + /* Set Link to DOWN */ pPrt->PHWLinkUp = SK_FALSE; /* Reset Port stati */ pPrt->PLinkModeStatus = SK_LMODE_STAT_UNKNOWN; pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE; + pPrt->PLinkSpeedUsed = SK_LSPEED_STAT_INDETERMINATED; - /* - * Reinit Phy especially when the AutoSense default is set now. - */ - SkXmInitPhy(pAC, IoC, Port, SK_FALSE); + /* Re-init Phy especially when the AutoSense default is set now */ + SkMacInitPhy(pAC, IoC, Port, SK_FALSE); - /* GP0: used for workaround of Rev. C Errata 2. */ + /* GP0: used for workaround of Rev. C Errata 2 */ - /* Do NOT signal to RLMT. */ + /* Do NOT signal to RLMT */ - /* Do NOT start the timer here. */ + /* Do NOT start the timer here */ } /* SkHWLinkDown */ @@ -512,19 +574,19 @@ * Note: * */ -void SkHWLinkUp( +void SkHWLinkUp( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ -int Port) /* Port Index (MAC_1 + n) */ +int Port) /* Port Index (MAC_1 + n) */ { - SK_GEPORT *pPrt; + SK_GEPORT *pPrt; /* GIni Port struct pointer */ pPrt = &pAC->GIni.GP[Port]; if (pPrt->PHWLinkUp) { /* We do NOT need to proceed on active link */ return; - } + } pPrt->PHWLinkUp = SK_TRUE; pPrt->PAutoNegFail = SK_FALSE; @@ -533,11 +595,14 @@ if (pPrt->PLinkMode != SK_LMODE_AUTOHALF && pPrt->PLinkMode != SK_LMODE_AUTOFULL && pPrt->PLinkMode != SK_LMODE_AUTOBOTH) { - /* Link is up and no Autonegotiation should be done */ + /* Link is up and no Auto-negotiation should be done */ /* Configure Port */ + + /* link speed should be the configured one */ + pPrt->PLinkSpeedUsed = pPrt->PLinkSpeed; - /* Set Link Mode */ + /* Set Link Mode Status */ if (pPrt->PLinkMode == SK_LMODE_FULL) { pPrt->PLinkModeStatus = SK_LMODE_STAT_FULL; } @@ -545,11 +610,11 @@ pPrt->PLinkModeStatus = SK_LMODE_STAT_HALF; } - /* No flow control without autonegotiation */ + /* No flow control without auto-negotiation */ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE; - /* RX/TX enable */ - SkXmRxTxEnable(pAC, IoC, Port); + /* enable Rx/Tx */ + SkMacRxTxEnable(pAC, IoC, Port); } } /* SkHWLinkUp */ @@ -559,29 +624,33 @@ * SkMacParity - does everything to handle MAC parity errors correctly * */ -static void SkMacParity( +static void SkMacParity( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ -int Port) /* Port Index of the port failed */ +int Port) /* Port Index of the port failed */ { SK_EVPARA Para; SK_GEPORT *pPrt; /* GIni Port struct pointer */ - SK_U64 TxMax; /* TxMax Counter */ - unsigned Len; + SK_U32 TxMax; /* TxMax Counter */ pPrt = &pAC->GIni.GP[Port]; - /* Clear IRQ */ - SK_OUT16(IoC, MR_ADDR(Port,TX_MFF_CTRL1), MFF_CLR_PERR); + /* Clear IRQ Tx Parity Error */ + if (pAC->GIni.GIGenesis) { + SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_CLR_PERR); + } + else { + /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ + SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), + (SK_U8)((pAC->GIni.GIChipRev == 0) ? GMF_CLI_TX_FC : GMF_CLI_TX_PE)); + } if (pPrt->PCheckPar) { if (Port == MAC_1) { - SK_ERR_LOG(pAC, SK_ERRCL_HW , SKERR_SIRQ_E016, - SKERR_SIRQ_E016MSG); + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E016, SKERR_SIRQ_E016MSG); } else { - SK_ERR_LOG(pAC, SK_ERRCL_HW , SKERR_SIRQ_E017, - SKERR_SIRQ_E017MSG); + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E017, SKERR_SIRQ_E017MSG); } Para.Para64 = Port; SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para); @@ -592,11 +661,16 @@ } /* Check whether frames with a size of 1k were sent */ - Len = sizeof(SK_U64); - SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_MAX, (char *)&TxMax, - &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, Port), - pAC->Rlmt.Port[Port].Net->NetNumber); - + if (pAC->GIni.GIGenesis) { + /* Snap statistic counters */ + (void)SkXmUpdateStats(pAC, IoC, Port); + + (void)SkXmMacStatistic(pAC, IoC, Port, XM_TXF_MAX_SZ, &TxMax); + } + else { + (void)SkGmMacStatistic(pAC, IoC, Port, GM_TXF_1518B, &TxMax); + } + if (TxMax > 0) { /* From now on check the parity */ pPrt->PCheckPar = SK_TRUE; @@ -612,7 +686,7 @@ * * Notes: */ -static void SkGeHwErr( +static void SkGeHwErr( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ SK_U32 HwStatus) /* Interrupt status word */ @@ -620,17 +694,19 @@ SK_EVPARA Para; SK_U16 Word; - if ((HwStatus & IS_IRQ_MST_ERR) || (HwStatus & IS_IRQ_STAT)) { - if (HwStatus & IS_IRQ_STAT) { - SK_ERR_LOG(pAC, SK_ERRCL_HW , SKERR_SIRQ_E013, SKERR_SIRQ_E013MSG); + if ((HwStatus & (IS_IRQ_MST_ERR | IS_IRQ_STAT)) != 0) { + /* PCI Errors occured */ + if ((HwStatus & IS_IRQ_STAT) != 0) { + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E013, SKERR_SIRQ_E013MSG); } else { - SK_ERR_LOG(pAC, SK_ERRCL_HW , SKERR_SIRQ_E012, SKERR_SIRQ_E012MSG); + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E012, SKERR_SIRQ_E012MSG); } /* Reset all bits in the PCI STATUS register */ - SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON); SK_IN16(IoC, PCI_C(PCI_STATUS), &Word); + + SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON); SK_OUT16(IoC, PCI_C(PCI_STATUS), Word | PCI_ERRBITS); SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF); @@ -638,68 +714,82 @@ SkEventQueue(pAC, SKGE_DRV, SK_DRV_ADAP_FAIL, Para); } - if (HwStatus & IS_NO_STAT_M1) { - /* Ignore it */ - /* This situation is also indicated in the descriptor */ - SK_OUT16(IoC, MR_ADDR(MAC_1,RX_MFF_CTRL1), MFF_CLR_INSTAT); - } - - if (HwStatus & IS_NO_STAT_M2) { - /* Ignore it */ - /* This situation is also indicated in the descriptor */ - SK_OUT16(IoC, MR_ADDR(MAC_2,RX_MFF_CTRL1), MFF_CLR_INSTAT); - } - - if (HwStatus & IS_NO_TIST_M1) { - /* Ignore it */ - /* This situation is also indicated in the descriptor */ - SK_OUT16(IoC, MR_ADDR(MAC_1,RX_MFF_CTRL1), MFF_CLR_INTIST); + if (pAC->GIni.GIGenesis) { + if ((HwStatus & IS_NO_STAT_M1) != 0) { + /* Ignore it */ + /* This situation is also indicated in the descriptor */ + SK_OUT16(IoC, MR_ADDR(MAC_1, RX_MFF_CTRL1), MFF_CLR_INSTAT); + } + + if ((HwStatus & IS_NO_STAT_M2) != 0) { + /* Ignore it */ + /* This situation is also indicated in the descriptor */ + SK_OUT16(IoC, MR_ADDR(MAC_2, RX_MFF_CTRL1), MFF_CLR_INSTAT); + } + + if ((HwStatus & IS_NO_TIST_M1) != 0) { + /* Ignore it */ + /* This situation is also indicated in the descriptor */ + SK_OUT16(IoC, MR_ADDR(MAC_1, RX_MFF_CTRL1), MFF_CLR_INTIST); + } + + if ((HwStatus & IS_NO_TIST_M2) != 0) { + /* Ignore it */ + /* This situation is also indicated in the descriptor */ + SK_OUT16(IoC, MR_ADDR(MAC_2, RX_MFF_CTRL1), MFF_CLR_INTIST); + } } + else { /* YUKON */ + /* This is necessary only for Rx timing measurements */ + if ((HwStatus & IS_IRQ_TIST_OV) != 0) { + /* Clear Time Stamp Timer IRQ */ + SK_OUT8(IoC, GMAC_TI_ST_CTRL, (SK_U8)GMT_ST_CLR_IRQ); + } - if (HwStatus & IS_NO_TIST_M2) { - /* Ignore it */ - /* This situation is also indicated in the descriptor */ - SK_OUT16(IoC, MR_ADDR(MAC_2,RX_MFF_CTRL1), MFF_CLR_INTIST); + if ((HwStatus & IS_IRQ_SENSOR) != 0) { + /* Clear I2C IRQ */ + SK_OUT32(IoC, B2_I2C_IRQ, I2C_CLR_IRQ); + } } - if (HwStatus & IS_RAM_RD_PAR) { + if ((HwStatus & IS_RAM_RD_PAR) != 0) { SK_OUT16(IoC, B3_RI_CTRL, RI_CLR_RD_PERR); - SK_ERR_LOG(pAC, SK_ERRCL_HW , SKERR_SIRQ_E014, SKERR_SIRQ_E014MSG); + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E014, SKERR_SIRQ_E014MSG); Para.Para64 = 0; SkEventQueue(pAC, SKGE_DRV, SK_DRV_ADAP_FAIL, Para); } - if (HwStatus & IS_RAM_WR_PAR) { + if ((HwStatus & IS_RAM_WR_PAR) != 0) { SK_OUT16(IoC, B3_RI_CTRL, RI_CLR_WR_PERR); - SK_ERR_LOG(pAC, SK_ERRCL_HW , SKERR_SIRQ_E015, SKERR_SIRQ_E015MSG); + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E015, SKERR_SIRQ_E015MSG); Para.Para64 = 0; SkEventQueue(pAC, SKGE_DRV, SK_DRV_ADAP_FAIL, Para); } - if (HwStatus & IS_M1_PAR_ERR) { + if ((HwStatus & IS_M1_PAR_ERR) != 0) { SkMacParity(pAC, IoC, MAC_1); } - if (HwStatus & IS_M2_PAR_ERR) { + if ((HwStatus & IS_M2_PAR_ERR) != 0) { SkMacParity(pAC, IoC, MAC_2); } - if (HwStatus & IS_R1_PAR_ERR) { + if ((HwStatus & IS_R1_PAR_ERR) != 0) { /* Clear IRQ */ SK_OUT32(IoC, B0_R1_CSR, CSR_IRQ_CL_P); - SK_ERR_LOG(pAC, SK_ERRCL_HW , SKERR_SIRQ_E018, SKERR_SIRQ_E018MSG); + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E018, SKERR_SIRQ_E018MSG); Para.Para64 = MAC_1; SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para); Para.Para32[0] = MAC_1; SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); } - if (HwStatus & IS_R2_PAR_ERR) { + if ((HwStatus & IS_R2_PAR_ERR) != 0) { /* Clear IRQ */ SK_OUT32(IoC, B0_R2_CSR, CSR_IRQ_CL_P); - SK_ERR_LOG(pAC, SK_ERRCL_HW , SKERR_SIRQ_E019, SKERR_SIRQ_E019MSG); + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E019, SKERR_SIRQ_E019MSG); Para.Para64 = MAC_2; SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para); Para.Para32[0] = MAC_2; @@ -716,126 +806,122 @@ * * Notes: */ -void SkGeSirqIsr( +void SkGeSirqIsr( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ SK_U32 Istatus) /* Interrupt status word */ { SK_EVPARA Para; - SK_U32 RegVal32; /* Read register Value */ - SK_U16 XmIsr; + SK_U32 RegVal32; /* Read register value */ + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + unsigned Len; + SK_U64 Octets; + SK_U16 PhyInt; + SK_U16 PhyIMsk; + int i; - if (Istatus & IS_HW_ERR) { + if ((Istatus & IS_HW_ERR) != 0) { + /* read the HW Error Interrupt source */ SK_IN32(IoC, B0_HWE_ISRC, &RegVal32); + SkGeHwErr(pAC, IoC, RegVal32); } /* * Packet Timeout interrupts */ - /* Check whether XMACs are correctly initialized */ - if ((Istatus & (IS_PA_TO_RX1 | IS_PA_TO_TX1)) && - !pAC->GIni.GP[MAC_1].PState) { - /* XMAC was not initialized but Packet timeout occured */ + /* Check whether MACs are correctly initialized */ + if (((Istatus & (IS_PA_TO_RX1 | IS_PA_TO_TX1)) != 0) && + pAC->GIni.GP[MAC_1].PState == SK_PRT_RESET) { + /* MAC 1 was not initialized but Packet timeout occured */ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E004, SKERR_SIRQ_E004MSG); } - if ((Istatus & (IS_PA_TO_RX2 | IS_PA_TO_TX2)) && - !pAC->GIni.GP[MAC_2].PState) { - /* XMAC was not initialized but Packet timeout occured */ + if (((Istatus & (IS_PA_TO_RX2 | IS_PA_TO_TX2)) != 0) && + pAC->GIni.GP[MAC_2].PState == SK_PRT_RESET) { + /* MAC 2 was not initialized but Packet timeout occured */ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E005, SKERR_SIRQ_E005MSG); } - if (Istatus & IS_PA_TO_RX1) { + if ((Istatus & IS_PA_TO_RX1) != 0) { /* Means network is filling us up */ SK_ERR_LOG(pAC, SK_ERRCL_HW | SK_ERRCL_INIT, SKERR_SIRQ_E002, SKERR_SIRQ_E002MSG); SK_OUT16(IoC, B3_PA_CTRL, PA_CLR_TO_RX1); } - if (Istatus & IS_PA_TO_RX2) { + if ((Istatus & IS_PA_TO_RX2) != 0) { /* Means network is filling us up */ SK_ERR_LOG(pAC, SK_ERRCL_HW | SK_ERRCL_INIT, SKERR_SIRQ_E003, SKERR_SIRQ_E003MSG); SK_OUT16(IoC, B3_PA_CTRL, PA_CLR_TO_RX2); } - if (Istatus & IS_PA_TO_TX1) { - unsigned int Len; - SK_U64 Octets; - SK_GEPORT *pPrt = &pAC->GIni.GP[0]; + if ((Istatus & IS_PA_TO_TX1) != 0) { + + pPrt = &pAC->GIni.GP[0]; /* May be a normal situation in a server with a slow network */ SK_OUT16(IoC, B3_PA_CTRL, PA_CLR_TO_TX1); /* - * workaround: if in half duplex mode, check for tx hangup. + * workaround: if in half duplex mode, check for Tx hangup. * Read number of TX'ed bytes, wait for 10 ms, then compare - * the number with current value. If nothing changed, we - * assume that tx is hanging and do a FIFO flush (see event - * routine). + * the number with current value. If nothing changed, we assume + * that Tx is hanging and do a FIFO flush (see event routine). */ - if ((pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || + if ((pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) && !pPrt->HalfDupTimerActive) { - /* + /* * many more pack. arb. timeouts may come in between, * we ignore those */ pPrt->HalfDupTimerActive = SK_TRUE; Len = sizeof(SK_U64); - SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *) &Octets, + SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets, &Len, (SK_U32) SK_PNMI_PORT_PHYS2INST(pAC, 0), pAC->Rlmt.Port[0].Net->NetNumber); + pPrt->LastOctets = Octets; + Para.Para32[0] = 0; - SkTimerStart(pAC, IoC, - &pPrt->HalfDupChkTimer, - SK_HALFDUP_CHK_TIME, - SKGE_HWAC, - SK_HWEV_HALFDUP_CHK, - Para); + SkTimerStart(pAC, IoC, &pPrt->HalfDupChkTimer, SK_HALFDUP_CHK_TIME, + SKGE_HWAC, SK_HWEV_HALFDUP_CHK, Para); } } - if (Istatus & IS_PA_TO_TX2) { - unsigned int Len; - SK_U64 Octets; - SK_GEPORT *pPrt = &pAC->GIni.GP[1]; + if ((Istatus & IS_PA_TO_TX2) != 0) { + + pPrt = &pAC->GIni.GP[1]; /* May be a normal situation in a server with a slow network */ SK_OUT16(IoC, B3_PA_CTRL, PA_CLR_TO_TX2); - /* - * workaround: see above - */ - if ((pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || - pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) && + /* workaround: see above */ + if ((pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || + pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) && !pPrt->HalfDupTimerActive) { pPrt->HalfDupTimerActive = SK_TRUE; Len = sizeof(SK_U64); - SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *) &Octets, + SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets, &Len, (SK_U32) SK_PNMI_PORT_PHYS2INST(pAC, 1), pAC->Rlmt.Port[1].Net->NetNumber); + pPrt->LastOctets = Octets; + Para.Para32[0] = 1; - SkTimerStart(pAC, IoC, - &pPrt->HalfDupChkTimer, - SK_HALFDUP_CHK_TIME, - SKGE_HWAC, - SK_HWEV_HALFDUP_CHK, - Para); + SkTimerStart(pAC, IoC, &pPrt->HalfDupChkTimer, SK_HALFDUP_CHK_TIME, + SKGE_HWAC, SK_HWEV_HALFDUP_CHK, Para); } } - /* - * Check interrupts of the particular queues. - */ - if (Istatus & IS_R1_C) { + /* Check interrupts of the particular queues */ + if ((Istatus & IS_R1_C) != 0) { /* Clear IRQ */ SK_OUT32(IoC, B0_R1_CSR, CSR_IRQ_CL_C); SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E006, @@ -846,7 +932,7 @@ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); } - if (Istatus & IS_R2_C) { + if ((Istatus & IS_R2_C) != 0) { /* Clear IRQ */ SK_OUT32(IoC, B0_R2_CSR, CSR_IRQ_CL_C); SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E007, @@ -857,7 +943,7 @@ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); } - if (Istatus & IS_XS1_C) { + if ((Istatus & IS_XS1_C) != 0) { /* Clear IRQ */ SK_OUT32(IoC, B0_XS1_CSR, CSR_IRQ_CL_C); SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E008, @@ -868,7 +954,7 @@ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); } - if (Istatus & IS_XA1_C) { + if ((Istatus & IS_XA1_C) != 0) { /* Clear IRQ */ SK_OUT32(IoC, B0_XA1_CSR, CSR_IRQ_CL_C); SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E009, @@ -879,7 +965,7 @@ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); } - if (Istatus & IS_XS2_C) { + if ((Istatus & IS_XS2_C) != 0) { /* Clear IRQ */ SK_OUT32(IoC, B0_XS2_CSR, CSR_IRQ_CL_C); SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E010, @@ -890,7 +976,7 @@ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); } - if (Istatus & IS_XA2_C) { + if ((Istatus & IS_XA2_C) != 0) { /* Clear IRQ */ SK_OUT32(IoC, B0_XA2_CSR, CSR_IRQ_CL_C); SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E011, @@ -901,58 +987,55 @@ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); } - /* - * External reg interrupt. - */ - if (Istatus & IS_EXT_REG) { - SK_U16 PhyInt; - SK_U16 PhyIMsk; - int i; - SK_GEPORT *pPrt; /* GIni Port struct pointer */ - - /* Test IRQs from PHY. */ + /* External reg interrupt */ + if ((Istatus & IS_EXT_REG) != 0) { + /* Test IRQs from PHY */ for (i = 0; i < pAC->GIni.GIMacsFound; i++) { + pPrt = &pAC->GIni.GP[i]; + + if (pPrt->PState == SK_PRT_RESET) { + continue; + } + switch (pPrt->PhyType) { + case SK_PHY_XMAC: break; + case SK_PHY_BCOM: - if (pPrt->PState) { - PHY_READ(IoC, pPrt, i, PHY_BCOM_INT_STAT, &PhyInt); - PHY_READ(IoC, pPrt, i, PHY_BCOM_INT_MASK, &PhyIMsk); + SkXmPhyRead(pAC, IoC, i, PHY_BCOM_INT_STAT, &PhyInt); + SkXmPhyRead(pAC, IoC, i, PHY_BCOM_INT_MASK, &PhyIMsk); -#ifdef xDEBUG - if (PhyInt & PhyIMsk) { - CMSMPrintString( - pAC->pConfigTable, - MSG_TYPE_RUNTIME_INFO, - "SirqIsr - Stat: %x", - (void *)PhyInt, - (void *)NULL); - } -#endif /* DEBUG */ - - if (PhyInt & ~PhyIMsk) { - SK_DBG_MSG( - pAC, - SK_DBGMOD_HWM, - SK_DBGCAT_IRQ, - ("Port %d Bcom Int: %x Mask: %x\n", - i, PhyInt, PhyIMsk)); - SkPhyIsrBcom(pAC, IoC, i, PhyInt); - } + if ((PhyInt & ~PhyIMsk) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Port %d Bcom Int: 0x%04X Mask: 0x%04X\n", + i, PhyInt, PhyIMsk)); + SkPhyIsrBcom(pAC, IoC, i, PhyInt); + } + break; + + case SK_PHY_MARV_COPPER: + case SK_PHY_MARV_FIBER: + SkGmPhyRead(pAC, IoC, i, PHY_MARV_INT_STAT, &PhyInt); + SkGmPhyRead(pAC, IoC, i, PHY_MARV_INT_MASK, &PhyIMsk); + + if ((PhyInt & PhyIMsk) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Port %d Marv Int: 0x%04X Mask: 0x%04X\n", + i, PhyInt, PhyIMsk)); + SkPhyIsrGmac(pAC, IoC, i, PhyInt); } break; + +#ifdef OTHER_PHY case SK_PHY_LONE: - PHY_READ(IoC, pPrt, i, PHY_LONE_INT_STAT, &PhyInt); - PHY_READ(IoC, pPrt, i, PHY_LONE_INT_ENAB, &PhyIMsk); + SkXmPhyRead(pAC, IoC, i, PHY_LONE_INT_STAT, &PhyInt); + SkXmPhyRead(pAC, IoC, i, PHY_LONE_INT_ENAB, &PhyIMsk); - if (PhyInt & PhyIMsk) { - SK_DBG_MSG( - pAC, - SK_DBGMOD_HWM, - SK_DBGCAT_IRQ, - ("Port %d Lone Int: %x Mask: %x\n", + if ((PhyInt & PhyIMsk) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Port %d Lone Int: %x Mask: %x\n", i, PhyInt, PhyIMsk)); SkPhyIsrLone(pAC, IoC, i, PhyInt); } @@ -960,18 +1043,17 @@ case SK_PHY_NAT: /* todo: National */ break; +#endif /* OTHER_PHY */ } } } - /* - * I2C Ready interrupt - */ - if (Istatus & IS_I2C_READY) { + /* I2C Ready interrupt */ + if ((Istatus & IS_I2C_READY) != 0) { SkI2cIsr(pAC, IoC); } - if (Istatus & IS_LNK_SYNC_M1) { + if ((Istatus & IS_LNK_SYNC_M1) != 0) { /* * We do NOT need the Link Sync interrupt, because it shows * us only a link going down. @@ -981,31 +1063,28 @@ } /* Check MAC after link sync counter */ - if (Istatus & IS_MAC1) { - XM_IN16(IoC, MAC_1, XM_ISRC, &XmIsr); - SkXmIrq(pAC, IoC, MAC_1, XmIsr); + if ((Istatus & IS_MAC1) != 0) { + /* IRQ from MAC 1 */ + SkMacIrq(pAC, IoC, MAC_1); } - if (Istatus & IS_LNK_SYNC_M2) { + if ((Istatus & IS_LNK_SYNC_M2) != 0) { /* * We do NOT need the Link Sync interrupt, because it shows * us only a link going down. */ /* clear interrupt */ - SK_OUT8(IoC, MR_ADDR(MAC_2,LNK_SYNC_CTRL), LED_CLR_IRQ); + SK_OUT8(IoC, MR_ADDR(MAC_2, LNK_SYNC_CTRL), LED_CLR_IRQ); } /* Check MAC after link sync counter */ - if (Istatus & IS_MAC2) { - XM_IN16(IoC, MAC_2, XM_ISRC, &XmIsr); - SkXmIrq(pAC, IoC, MAC_2, XmIsr); + if ((Istatus & IS_MAC2) != 0) { + /* IRQ from MAC 2 */ + SkMacIrq(pAC, IoC, MAC_2); } - /* - * Timer interrupt - * To be served last - */ - if (Istatus & IS_TIMINT) { + /* Timer interrupt (served last) */ + if ((Istatus & IS_TIMINT) != 0) { SkHwtIsr(pAC, IoC); } } /* SkGeSirqIsr */ @@ -1013,24 +1092,23 @@ /****************************************************************************** * - * SkGePortCheckShorts - Implementing of the Workaround Errata # 2 + * SkGePortCheckShorts - Implementing XMAC Workaround Errata # 2 * * return: * 0 o.k. nothing needed * 1 Restart needed on this port */ -int SkGePortCheckShorts( +static int SkGePortCheckShorts( SK_AC *pAC, /* Adapter Context */ SK_IOC IoC, /* IO Context */ int Port) /* Which port should be checked */ { - SK_U64 Shorts; /* Short Event Counter */ - SK_U64 CheckShorts; /* Check value for Short Event Counter */ - SK_U64 RxCts; /* RX Counter (packets on network) */ - SK_U64 RxTmp; /* RX temp. Counter */ - SK_U64 FcsErrCts; /* FCS Error Counter */ + SK_U32 Shorts; /* Short Event Counter */ + SK_U32 CheckShorts; /* Check value for Short Event Counter */ + SK_U64 RxCts; /* Rx Counter (packets on network) */ + SK_U32 RxTmp; /* Rx temp. Counter */ + SK_U32 FcsErrCts; /* FCS Error Counter */ SK_GEPORT *pPrt; /* GIni Port struct pointer */ - unsigned Len; int Rtv; /* Return value */ int i; @@ -1039,48 +1117,35 @@ /* Default: no action */ Rtv = SK_HW_PS_NONE; - /* - * Extra precaution: check for short Event counter - */ - Len = sizeof(SK_U64); - SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_RX_SHORTS, (char *)&Shorts, - &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, Port), - pAC->Rlmt.Port[Port].Net->NetNumber); + (void)SkXmUpdateStats(pAC, IoC, Port); + + /* Extra precaution: check for short Event counter */ + (void)SkXmMacStatistic(pAC, IoC, Port, XM_RXE_SHT_ERR, &Shorts); /* - * Read RX counter (packets seen on the network and not neccesarily + * Read Rx counter (packets seen on the network and not necessarily * really received. */ - Len = sizeof(SK_U64); RxCts = 0; - for (i = 0; i < sizeof(SkGeRxOids)/sizeof(SK_U32); i++) { - SkPnmiGetVar(pAC, IoC, SkGeRxOids[i], (char *)&RxTmp, - &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, Port), - pAC->Rlmt.Port[Port].Net->NetNumber); - RxCts += RxTmp; + for (i = 0; i < sizeof(SkGeRxRegs)/sizeof(SkGeRxRegs[0]); i++) { + (void)SkXmMacStatistic(pAC, IoC, Port, SkGeRxRegs[i], &RxTmp); + RxCts += (SK_U64)RxTmp; } /* On default: check shorts against zero */ CheckShorts = 0; - /* - * Extra extra precaution on active links: - */ + /* Extra precaution on active links */ if (pPrt->PHWLinkUp) { - /* - * Reset Link Restart counter - */ + /* Reset Link Restart counter */ pPrt->PLinkResCt = 0; pPrt->PAutoNegTOCt = 0; /* If link is up check for 2 */ CheckShorts = 2; - Len = sizeof(SK_U64); - SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_RX_FCS, - (char *)&FcsErrCts, &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, Port), - pAC->Rlmt.Port[Port].Net->NetNumber); + (void)SkXmMacStatistic(pAC, IoC, Port, XM_RXF_FCS_ERR, &FcsErrCts); if (pPrt->PLinkModeConf == SK_LMODE_AUTOSENSE && pPrt->PLipaAutoNeg == SK_LIPA_UNKNOWN && @@ -1091,13 +1156,11 @@ * manual full/half duplex mode. */ if (RxCts == pPrt->PPrevRx) { - /* - * Nothing received - * restart link - */ + /* Nothing received, restart link */ pPrt->PPrevFcs = FcsErrCts; pPrt->PPrevShorts = Shorts; - return (SK_HW_PS_RESTART); + + return(SK_HW_PS_RESTART); } else { pPrt->PLipaAutoNeg = SK_LIPA_MANUAL; @@ -1118,7 +1181,7 @@ pPrt->PPrevFcs = FcsErrCts; pPrt->PPrevShorts = Shorts; - return (SK_HW_PS_NONE); + return(SK_HW_PS_NONE); } pPrt->PPrevFcs = FcsErrCts; @@ -1134,8 +1197,8 @@ pPrt->PPrevShorts = Shorts; pPrt->PPrevRx = RxCts; - return (Rtv); -} /* SkGePortCheckShorts*/ + return(Rtv); +} /* SkGePortCheckShorts */ /****************************************************************************** @@ -1147,22 +1210,27 @@ * 1 Restart needed on this port * 2 Link came up */ -int SkGePortCheckUp( +static int SkGePortCheckUp( SK_AC *pAC, /* Adapter Context */ SK_IOC IoC, /* IO Context */ int Port) /* Which port should be checked */ { switch (pAC->GIni.GP[Port].PhyType) { case SK_PHY_XMAC: - return (SkGePortCheckUpXmac(pAC, IoC, Port)); + return(SkGePortCheckUpXmac(pAC, IoC, Port)); case SK_PHY_BCOM: - return (SkGePortCheckUpBcom(pAC, IoC, Port)); + return(SkGePortCheckUpBcom(pAC, IoC, Port)); + case SK_PHY_MARV_COPPER: + case SK_PHY_MARV_FIBER: + return(SkGePortCheckUpGmac(pAC, IoC, Port)); +#ifdef OTHER_PHY case SK_PHY_LONE: - return (SkGePortCheckUpLone(pAC, IoC, Port)); + return(SkGePortCheckUpLone(pAC, IoC, Port)); case SK_PHY_NAT: - return (SkGePortCheckUpNat(pAC, IoC, Port)); + return(SkGePortCheckUpNat(pAC, IoC, Port)); +#endif /* OTHER_PHY */ } - return (SK_HW_PS_NONE); + return(SK_HW_PS_NONE); } /* SkGePortCheckUp */ @@ -1175,14 +1243,13 @@ * 1 Restart needed on this port * 2 Link came up */ -static int SkGePortCheckUpXmac( +static int SkGePortCheckUpXmac( SK_AC *pAC, /* Adapter Context */ SK_IOC IoC, /* IO Context */ int Port) /* Which port should be checked */ { - SK_U64 Shorts; /* Short Event Counter */ + SK_U32 Shorts; /* Short Event Counter */ SK_GEPORT *pPrt; /* GIni Port struct pointer */ - unsigned Len; int Done; SK_U32 GpReg; /* General Purpose register value */ SK_U16 Isrc; /* Interrupt source register */ @@ -1190,26 +1257,25 @@ SK_U16 LpAb; /* Link Partner Ability */ SK_U16 ResAb; /* Resolved Ability */ SK_U16 ExtStat; /* Extended Status Register */ - SK_BOOL AutoNeg; /* Is Autonegotiation used ? */ + SK_BOOL AutoNeg; /* Is Auto-negotiation used ? */ SK_U8 NextMode; /* Next AutoSensing Mode */ pPrt = &pAC->GIni.GP[Port]; if (pPrt->PHWLinkUp) { if (pPrt->PhyType != SK_PHY_XMAC) { - return (SK_HW_PS_NONE); + return(SK_HW_PS_NONE); } else { - return (SkGePortCheckShorts(pAC, IoC, Port)); + return(SkGePortCheckShorts(pAC, IoC, Port)); } } IsrcSum = pPrt->PIsave; pPrt->PIsave = 0; - /* Now wait for each port's link. */ - if (pPrt->PLinkMode == SK_LMODE_HALF || - pPrt->PLinkMode == SK_LMODE_FULL) { + /* Now wait for each port's link */ + if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { AutoNeg = SK_FALSE; } else { @@ -1218,27 +1284,26 @@ if (pPrt->PLinkBroken) { /* Link was broken */ - XM_IN32(IoC,Port,XM_GP_PORT, &GpReg); + XM_IN32(IoC, Port, XM_GP_PORT, &GpReg); if ((GpReg & XM_GP_INP_ASS) == 0) { /* The Link is in sync */ - XM_IN16(IoC,Port,XM_ISRC, &Isrc); + XM_IN16(IoC, Port, XM_ISRC, &Isrc); IsrcSum |= Isrc; SkXmAutoNegLipaXmac(pAC, IoC, Port, IsrcSum); if ((Isrc & XM_IS_INP_ASS) == 0) { - /* It has been in sync since last Time */ + /* It has been in sync since last time */ /* Restart the PORT */ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, ("Link in sync Restart Port %d\n", Port)); - /* We now need to reinitialize the PrevShorts counter. */ - Len = sizeof(SK_U64); - SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_RX_SHORTS, (char *)&Shorts, - &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, Port), - pAC->Rlmt.Port[Port].Net->NetNumber); + (void)SkXmUpdateStats(pAC, IoC, Port); + + /* We now need to reinitialize the PrevShorts counter */ + (void)SkXmMacStatistic(pAC, IoC, Port, XM_RXE_SHT_ERR, &Shorts); pPrt->PPrevShorts = Shorts; - pAC->GIni.GP[Port].PLinkBroken = SK_FALSE; + pPrt->PLinkBroken = SK_FALSE; /* * Link Restart Workaround: @@ -1248,31 +1313,34 @@ * happening we check for a maximum number * of consecutive restart. If those happens, * we do NOT restart the active link and - * check whether the lionk is now o.k. + * check whether the link is now o.k. */ - pAC->GIni.GP[Port].PLinkResCt ++; + pPrt->PLinkResCt++; + pPrt->PAutoNegTimeOut = 0; - if (pAC->GIni.GP[Port].PLinkResCt < SK_MAX_LRESTART) { - return (SK_HW_PS_RESTART); + if (pPrt->PLinkResCt < SK_MAX_LRESTART) { + return(SK_HW_PS_RESTART); } - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, + pPrt->PLinkResCt = 0; + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("Do NOT restart on Port %d %x %x\n", Port, Isrc, IsrcSum)); - pAC->GIni.GP[Port].PLinkResCt = 0; } else { - pPrt->PIsave = (SK_U16)(IsrcSum & (XM_IS_AND)); - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, + pPrt->PIsave = IsrcSum & XM_IS_AND; + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("Save Sync/nosync Port %d %x %x\n", Port, Isrc, IsrcSum)); /* Do nothing more if link is broken */ - return (SK_HW_PS_NONE); + return(SK_HW_PS_NONE); } } else { /* Do nothing more if link is broken */ - return (SK_HW_PS_NONE); + return(SK_HW_PS_NONE); } } @@ -1280,69 +1348,67 @@ /* Link was not broken, check if it is */ XM_IN16(IoC, Port, XM_ISRC, &Isrc); IsrcSum |= Isrc; - if ((Isrc & XM_IS_INP_ASS) == XM_IS_INP_ASS) { + if ((Isrc & XM_IS_INP_ASS) != 0) { XM_IN16(IoC, Port, XM_ISRC, &Isrc); IsrcSum |= Isrc; - if ((Isrc & XM_IS_INP_ASS) == XM_IS_INP_ASS) { + if ((Isrc & XM_IS_INP_ASS) != 0) { XM_IN16(IoC, Port, XM_ISRC, &Isrc); IsrcSum |= Isrc; - if ((Isrc & XM_IS_INP_ASS) == XM_IS_INP_ASS) { + if ((Isrc & XM_IS_INP_ASS) != 0) { pPrt->PLinkBroken = SK_TRUE; - /* - * Re-Init Link partner Autoneg flag - */ + /* Re-Init Link partner Autoneg flag */ pPrt->PLipaAutoNeg = SK_LIPA_UNKNOWN; - SK_DBG_MSG(pAC,SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, ("Link broken Port %d\n", Port)); - /* Cable removed-> reinit sense mode. */ - /* Init default sense mode. */ + /* Cable removed-> reinit sense mode */ SkHWInitDefSense(pAC, IoC, Port); - return (SK_HW_PS_RESTART); + return(SK_HW_PS_RESTART); } } } else { SkXmAutoNegLipaXmac(pAC, IoC, Port, Isrc); if (SkGePortCheckShorts(pAC, IoC, Port) == SK_HW_PS_RESTART) { - return (SK_HW_PS_RESTART); + return(SK_HW_PS_RESTART); } } } /* * here we usually can check whether the link is in sync and - * autonegotiation is done. + * auto-negotiation is done. */ XM_IN32(IoC, Port, XM_GP_PORT, &GpReg); XM_IN16(IoC, Port, XM_ISRC, &Isrc); IsrcSum |= Isrc; SkXmAutoNegLipaXmac(pAC, IoC, Port, IsrcSum); + if ((GpReg & XM_GP_INP_ASS) != 0 || (IsrcSum & XM_IS_INP_ASS) != 0) { if ((GpReg & XM_GP_INP_ASS) == 0) { - /* Save Autonegotiation Done interrupt only if link is in sync. */ - pPrt->PIsave = (SK_U16)(IsrcSum & (XM_IS_AND)); + /* Save Auto-negotiation Done interrupt only if link is in sync */ + pPrt->PIsave = IsrcSum & XM_IS_AND; } -#ifdef DEBUG - if (pPrt->PIsave & (XM_IS_AND)) { - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, +#ifdef DEBUG + if ((pPrt->PIsave & XM_IS_AND) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("AutoNeg done rescheduled Port %d\n", Port)); } -#endif - return (SK_HW_PS_NONE); +#endif /* DEBUG */ + return(SK_HW_PS_NONE); } if (AutoNeg) { - if (IsrcSum & XM_IS_AND) { + if ((IsrcSum & XM_IS_AND) != 0) { SkHWLinkUp(pAC, IoC, Port); - Done = SkXmAutoNegDone(pAC,IoC,Port); + Done = SkMacAutoNegDone(pAC, IoC, Port); if (Done != SK_AND_OK) { - /* Get PHY parameters, for debuging only */ - PHY_READ(IoC, pPrt, Port, PHY_XMAC_AUNE_LP, &LpAb); - PHY_READ(IoC, pPrt, Port, PHY_XMAC_RES_ABI, &ResAb); - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, + /* Get PHY parameters, for debugging only */ + SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_AUNE_LP, &LpAb); + SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_RES_ABI, &ResAb); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("AutoNeg FAIL Port %d (LpAb %x, ResAb %x)\n", Port, LpAb, ResAb)); @@ -1354,44 +1420,30 @@ SkHWSenseSetNext(pAC, IoC, Port, NextMode); } - return (SK_HW_PS_RESTART); - } - else { - /* - * Dummy Read extended status to prevent extra link down/ups - * (clear Page Received bit if set) - */ - PHY_READ(IoC, pPrt, Port, PHY_XMAC_AUNE_EXP, &ExtStat); - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("AutoNeg done Port %d\n", Port)); - return (SK_HW_PS_LINK); + return(SK_HW_PS_RESTART); } - } - - /* - * AutoNeg not done, but HW link is up. Check for timeouts - */ - pPrt->PAutoNegTimeOut ++; - if (pPrt->PAutoNegTimeOut >= SK_AND_MAX_TO) { /* - * Increase the Timeout counter. + * Dummy Read extended status to prevent extra link down/ups + * (clear Page Received bit if set) */ - pPrt->PAutoNegTOCt ++; + SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_AUNE_EXP, &ExtStat); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNeg done Port %d\n", Port)); + return(SK_HW_PS_LINK); + } + + /* AutoNeg not done, but HW link is up. Check for timeouts */ + pPrt->PAutoNegTimeOut++; + if (pPrt->PAutoNegTimeOut >= SK_AND_MAX_TO) { + /* Increase the Timeout counter */ + pPrt->PAutoNegTOCt++; - /* - * Timeout occured. - * What do we need now? - */ - SK_DBG_MSG(pAC,SK_DBGMOD_HWM, - SK_DBGCAT_IRQ, - ("AutoNeg timeout Port %d\n", - Port)); + /* Timeout occured */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("AutoNeg timeout Port %d\n", Port)); if (pPrt->PLinkModeConf == SK_LMODE_AUTOSENSE && pPrt->PLipaAutoNeg != SK_LIPA_AUTO) { - /* - * Timeout occured - * Set Link manually up. - */ + /* Set Link manually up */ SkHWSenseSetNext(pAC, IoC, Port, SK_LMODE_FULL); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, ("Set manual full duplex Port %d\n", Port)); @@ -1403,10 +1455,10 @@ /* * This is rather complicated. * we need to check here whether the LIPA_AUTO - * we saw before is false alert. We saw at one + * we saw before is false alert. We saw at one * switch ( SR8800) that on boot time it sends - * just one autoneg packet and does no further - * autonegotiation. + * just one auto-neg packet and does no further + * auto-negotiation. * Solution: we restart the autosensing after * a few timeouts. */ @@ -1415,30 +1467,31 @@ SkHWInitDefSense(pAC, IoC, Port); } - /* - * Do the restart - */ - return (SK_HW_PS_RESTART); + /* Do the restart */ + return(SK_HW_PS_RESTART); } } else { - /* - * Link is up and we don't need more. - */ -#ifdef DEBUG + /* Link is up and we don't need more */ +#ifdef DEBUG if (pPrt->PLipaAutoNeg == SK_LIPA_AUTO) { SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("ERROR: Lipa auto detected on port %d\n", Port)); } -#endif - - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_IRQ, +#endif /* DEBUG */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, ("Link sync(GP), Port %d\n", Port)); SkHWLinkUp(pAC, IoC, Port); - return (SK_HW_PS_LINK); + + /* + * Link sync (GP) and so assume a good connection. But if not received + * a bunch of frames received in a time slot (maybe broken tx cable) + * the port is restart. + */ + return(SK_HW_PS_LINK); } - return (SK_HW_PS_NONE); + return(SK_HW_PS_NONE); } /* SkGePortCheckUpXmac */ @@ -1451,7 +1504,7 @@ * 1 Restart needed on this port * 2 Link came up */ -static int SkGePortCheckUpBcom( +static int SkGePortCheckUpBcom( SK_AC *pAC, /* Adapter Context */ SK_IOC IoC, /* IO Context */ int Port) /* Which port should be checked */ @@ -1465,20 +1518,22 @@ #ifdef DEBUG SK_U16 LpAb; SK_U16 ExtStat; -#endif /* DEBUG */ - SK_BOOL AutoNeg; /* Is Autonegotiation used ? */ +#endif /* DEBUG */ + SK_BOOL AutoNeg; /* Is Auto-negotiation used ? */ pPrt = &pAC->GIni.GP[Port]; /* Check for No HCD Link events (#10523) */ - PHY_READ(IoC, pPrt, Port, PHY_BCOM_INT_STAT, &Isrc); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_STAT, &Isrc); #ifdef xDEBUG - if ((Isrc & ~0x1800) == 0x70) { + if ((Isrc & ~(PHY_B_IS_HCT | PHY_B_IS_LCT) == + (PHY_B_IS_SCR_S_ER | PHY_B_IS_RRS_CHANGE | PHY_B_IS_LRS_CHANGE)) { + SK_U32 Stat1, Stat2, Stat3; Stat1 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_INT_MASK, &Stat1); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_MASK, &Stat1); CMSMPrintString( pAC->pConfigTable, MSG_TYPE_RUNTIME_INFO, @@ -1487,14 +1542,14 @@ (void *)Stat1); Stat1 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_CTRL, &Stat1); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_CTRL, &Stat1); Stat2 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_STAT, &Stat2); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_STAT, &Stat2); Stat1 = Stat1 << 16 | Stat2; Stat2 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_AUNE_ADV, &Stat2); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_ADV, &Stat2); Stat3 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_AUNE_LP, &Stat3); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_LP, &Stat3); Stat2 = Stat2 << 16 | Stat3; CMSMPrintString( pAC->pConfigTable, @@ -1504,14 +1559,14 @@ (void *)Stat2); Stat1 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_AUNE_EXP, &Stat1); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_EXP, &Stat1); Stat2 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_EXT_STAT, &Stat2); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_EXT_STAT, &Stat2); Stat1 = Stat1 << 16 | Stat2; Stat2 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_1000T_CTRL, &Stat2); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_CTRL, &Stat2); Stat3 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_1000T_STAT, &Stat3); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &Stat3); Stat2 = Stat2 << 16 | Stat3; CMSMPrintString( pAC->pConfigTable, @@ -1521,14 +1576,14 @@ (void *)Stat2); Stat1 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_P_EXT_CTRL, &Stat1); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_P_EXT_CTRL, &Stat1); Stat2 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_P_EXT_STAT, &Stat2); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_P_EXT_STAT, &Stat2); Stat1 = Stat1 << 16 | Stat2; Stat2 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_AUX_CTRL, &Stat2); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &Stat2); Stat3 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_AUX_STAT, &Stat3); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_STAT, &Stat3); Stat2 = Stat2 << 16 | Stat3; CMSMPrintString( pAC->pConfigTable, @@ -1537,16 +1592,18 @@ (void *)Stat1, (void *)Stat2); } -#endif /* DEBUG */ +#endif /* DEBUG */ if ((Isrc & (PHY_B_IS_NO_HDCL /* | PHY_B_IS_NO_HDC */)) != 0) { /* * Workaround BCOM Errata: * enable and disable loopback mode if "NO HCD" occurs. */ - PHY_READ(IoC, pPrt, Port, PHY_BCOM_CTRL, &Ctrl); - PHY_WRITE(IoC, pPrt, Port, PHY_BCOM_CTRL, Ctrl | PHY_CT_LOOP); - PHY_WRITE(IoC, pPrt, Port, PHY_BCOM_CTRL, Ctrl & ~PHY_CT_LOOP); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_CTRL, &Ctrl); + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_CTRL, + (SK_U16)(Ctrl | PHY_CT_LOOP)); + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_CTRL, + (SK_U16)(Ctrl & ~PHY_CT_LOOP)); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("No HCD Link event, Port %d\n", Port)); #ifdef xDEBUG @@ -1556,14 +1613,14 @@ "No HCD link event, port %d.", (void *)Port, (void *)NULL); -#endif /* DEBUG */ +#endif /* DEBUG */ } /* Not obsolete: link status bit is latched to 0 and autoclearing! */ - PHY_READ(IoC, pPrt, Port, PHY_BCOM_STAT, &PhyStat); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_STAT, &PhyStat); if (pPrt->PHWLinkUp) { - return (SK_HW_PS_NONE); + return(SK_HW_PS_NONE); } #ifdef xDEBUG @@ -1571,7 +1628,7 @@ SK_U32 Stat1, Stat2, Stat3; Stat1 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_INT_MASK, &Stat1); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_MASK, &Stat1); CMSMPrintString( pAC->pConfigTable, MSG_TYPE_RUNTIME_INFO, @@ -1580,14 +1637,14 @@ (void *)Stat1); Stat1 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_CTRL, &Stat1); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_CTRL, &Stat1); Stat2 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_STAT, &PhyStat); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_STAT, &PhyStat); Stat1 = Stat1 << 16 | PhyStat; Stat2 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_AUNE_ADV, &Stat2); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_ADV, &Stat2); Stat3 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_AUNE_LP, &Stat3); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_LP, &Stat3); Stat2 = Stat2 << 16 | Stat3; CMSMPrintString( pAC->pConfigTable, @@ -1597,14 +1654,14 @@ (void *)Stat2); Stat1 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_AUNE_EXP, &Stat1); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_EXP, &Stat1); Stat2 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_EXT_STAT, &Stat2); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_EXT_STAT, &Stat2); Stat1 = Stat1 << 16 | Stat2; Stat2 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_1000T_CTRL, &Stat2); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_CTRL, &Stat2); Stat3 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_1000T_STAT, &ResAb); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &ResAb); Stat2 = Stat2 << 16 | ResAb; CMSMPrintString( pAC->pConfigTable, @@ -1614,14 +1671,14 @@ (void *)Stat2); Stat1 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_P_EXT_CTRL, &Stat1); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_P_EXT_CTRL, &Stat1); Stat2 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_P_EXT_STAT, &Stat2); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_P_EXT_STAT, &Stat2); Stat1 = Stat1 << 16 | Stat2; Stat2 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_AUX_CTRL, &Stat2); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &Stat2); Stat3 = 0; - PHY_READ(pAC, pPrt, Port, PHY_BCOM_AUX_STAT, &Stat3); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_STAT, &Stat3); Stat2 = Stat2 << 16 | Stat3; CMSMPrintString( pAC->pConfigTable, @@ -1630,9 +1687,9 @@ (void *)Stat1, (void *)Stat2); } -#endif /* DEBUG */ +#endif /* DEBUG */ - /* Now wait for each port's link. */ + /* Now wait for each port's link */ if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { AutoNeg = SK_FALSE; } @@ -1642,76 +1699,59 @@ /* * Here we usually can check whether the link is in sync and - * autonegotiation is done. + * auto-negotiation is done. */ -#if 0 -/* RA;:;: obsolete */ - XM_IN16(IoC, Port, XM_ISRC, &Isrc); -#endif /* 0 */ - PHY_READ(IoC, pPrt, Port, PHY_BCOM_STAT, &PhyStat); - -#ifdef xDEBUG - if ((PhyStat & PHY_ST_LSYNC) >> 2 != (ExtStat & PHY_B_PES_LS) >> 8) { - CMSMPrintString( - pAC->pConfigTable, - MSG_TYPE_RUNTIME_INFO, - "PhyStat != ExtStat: %x %x", - (void *)PhyStat, - (void *)ExtStat); - } -#endif /* DEBUG */ + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_STAT, &PhyStat); - SkXmAutoNegLipaBcom(pAC, IoC, Port, PhyStat); + SkMacAutoNegLipaPhy(pAC, IoC, Port, PhyStat); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("AutoNeg:%d, PhyStat: %Xh.\n", AutoNeg, PhyStat)); + ("AutoNeg: %d, PhyStat: 0x%04x\n", AutoNeg, PhyStat)); - PHY_READ(IoC, pPrt, Port, PHY_BCOM_1000T_STAT, &ResAb); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &ResAb); - if (ResAb & PHY_B_1000S_MSF) { + if ((ResAb & PHY_B_1000S_MSF) != 0) { /* Error */ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("Master/Slave Fault port %d\n", Port)); pPrt->PAutoNegFail = SK_TRUE; pPrt->PMSStatus = SK_MS_STAT_FAULT; - return (SK_HW_PS_RESTART); + + return(SK_HW_PS_RESTART); } if ((PhyStat & PHY_ST_LSYNC) == 0) { - return (SK_HW_PS_NONE); - } - else if (ResAb & PHY_B_1000S_MSR) { - pPrt->PMSStatus = SK_MS_STAT_MASTER; - } - else { - pPrt->PMSStatus = SK_MS_STAT_SLAVE; + return(SK_HW_PS_NONE); } + pPrt->PMSStatus = ((ResAb & PHY_B_1000S_MSR) != 0) ? + SK_MS_STAT_MASTER : SK_MS_STAT_SLAVE; + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("AutoNeg:%d, PhyStat: %Xh.\n", AutoNeg, PhyStat)); + ("AutoNeg: %d, PhyStat: 0x%04x\n", AutoNeg, PhyStat)); if (AutoNeg) { - if (PhyStat & PHY_ST_AN_OVER) { + if ((PhyStat & PHY_ST_AN_OVER) != 0) { SkHWLinkUp(pAC, IoC, Port); - Done = SkXmAutoNegDone(pAC, IoC, Port); + Done = SkMacAutoNegDone(pAC, IoC, Port); if (Done != SK_AND_OK) { #ifdef DEBUG - /* Get PHY parameters, for debugging only. */ - PHY_READ(IoC, pPrt, Port, PHY_BCOM_AUNE_LP, &LpAb); - PHY_READ(IoC, pPrt, Port, PHY_BCOM_1000T_STAT, &ExtStat); + /* Get PHY parameters, for debugging only */ + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_LP, &LpAb); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &ExtStat); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("AutoNeg FAIL Port %d (LpAb %x, 1000TStat %x)\n", Port, LpAb, ExtStat)); -#endif /* DEBUG */ - return (SK_HW_PS_RESTART); +#endif /* DEBUG */ + return(SK_HW_PS_RESTART); } else { #ifdef xDEBUG - /* Dummy read ISR to prevent extra link downs/ups. */ - PHY_READ(IoC, pPrt, Port, PHY_BCOM_INT_STAT, &ExtStat); + /* Dummy read ISR to prevent extra link downs/ups */ + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_STAT, &ExtStat); - if ((ExtStat & ~0x1800) != 0) { + if ((ExtStat & ~(PHY_B_IS_HCT | PHY_B_IS_LCT)) != 0) { CMSMPrintString( pAC->pConfigTable, MSG_TYPE_RUNTIME_INFO, @@ -1719,30 +1759,28 @@ (void *)ExtStat, (void *)NULL); } -#endif /* DEBUG */ +#endif /* DEBUG */ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("AutoNeg done Port %d\n", Port)); - return (SK_HW_PS_LINK); + return(SK_HW_PS_LINK); } - } + } } else { /* !AutoNeg */ - /* - * Link is up and we don't need more. - */ -#ifdef DEBUG + /* Link is up and we don't need more. */ +#ifdef DEBUG if (pPrt->PLipaAutoNeg == SK_LIPA_AUTO) { SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("ERROR: Lipa auto detected on port %d\n", Port)); } -#endif +#endif /* DEBUG */ #ifdef xDEBUG - /* Dummy read ISR to prevent extra link downs/ups. */ - PHY_READ(IoC, pPrt, Port, PHY_BCOM_INT_STAT, &ExtStat); + /* Dummy read ISR to prevent extra link downs/ups */ + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_STAT, &ExtStat); - if ((ExtStat & ~0x1800) != 0) { + if ((ExtStat & ~(PHY_B_IS_HCT | PHY_B_IS_LCT)) != 0) { CMSMPrintString( pAC->pConfigTable, MSG_TYPE_RUNTIME_INFO, @@ -1750,20 +1788,136 @@ (void *)ExtStat, (void *)NULL); } -#endif /* DEBUG */ +#endif /* DEBUG */ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, ("Link sync(GP), Port %d\n", Port)); SkHWLinkUp(pAC, IoC, Port); - return (SK_HW_PS_LINK); + return(SK_HW_PS_LINK); } - return (SK_HW_PS_NONE); + return(SK_HW_PS_NONE); } /* SkGePortCheckUpBcom */ /****************************************************************************** * + * SkGePortCheckUpGmac - Check, if the link is up + * + * return: + * 0 o.k. nothing needed + * 1 Restart needed on this port + * 2 Link came up + */ +static int SkGePortCheckUpGmac( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* IO Context */ +int Port) /* Which port should be checked */ +{ + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + int Done; + SK_U16 Isrc; /* Interrupt source */ + SK_U16 PhyStat; /* Phy Status */ + SK_U16 PhySpecStat;/* Phy Specific Status */ + SK_U16 ResAb; /* Master/Slave resolution */ + SK_BOOL AutoNeg; /* Is Auto-negotiation used ? */ + + pPrt = &pAC->GIni.GP[Port]; + + /* Read PHY Interrupt Status */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_INT_STAT, &Isrc); + + if ((Isrc & PHY_M_IS_AN_COMPL) != 0) { + /* TBD */ + } + + if (pPrt->PHWLinkUp) { + return(SK_HW_PS_NONE); + } + + /* Now wait for each port's link */ + if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { + AutoNeg = SK_FALSE; + } + else { + AutoNeg = SK_TRUE; + } + + /* Read PHY Status */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_STAT, &PhyStat); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNeg: %d, PhyStat: 0x%04x\n", AutoNeg, PhyStat)); + + SkMacAutoNegLipaPhy(pAC, IoC, Port, PhyStat); + + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_1000T_STAT, &ResAb); + + if ((ResAb & PHY_B_1000S_MSF) != 0) { + /* Error */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Master/Slave Fault port %d\n", Port)); + pPrt->PAutoNegFail = SK_TRUE; + pPrt->PMSStatus = SK_MS_STAT_FAULT; + + return(SK_HW_PS_RESTART); + } + + /* Read PHY Specific Status */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_STAT, &PhySpecStat); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNeg: %d, PhySpecStat: 0x%04x\n", AutoNeg, PhySpecStat)); + + if ((PhySpecStat & PHY_M_PS_LINK_UP) == 0) { + return(SK_HW_PS_NONE); + } + + pPrt->PMSStatus = ((ResAb & PHY_B_1000S_MSR) != 0) ? + SK_MS_STAT_MASTER : SK_MS_STAT_SLAVE; + + pPrt->PCableLen = (SK_U8)((PhySpecStat & PHY_M_PS_CABLE_MSK) >> 7); + + if (AutoNeg) { + /* Auto-Negotiation Over ? */ + if ((PhyStat & PHY_ST_AN_OVER) != 0) { + + SkHWLinkUp(pAC, IoC, Port); + + Done = SkMacAutoNegDone(pAC, IoC, Port); + + if (Done != SK_AND_OK) { + return(SK_HW_PS_RESTART); + } + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNeg done Port %d\n", Port)); + return(SK_HW_PS_LINK); + } + } + else { /* !AutoNeg */ + /* Link is up and we don't need more */ +#ifdef DEBUG + if (pPrt->PLipaAutoNeg == SK_LIPA_AUTO) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("ERROR: Lipa auto detected on port %d\n", Port)); + } +#endif /* DEBUG */ + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Link sync, Port %d\n", Port)); + SkHWLinkUp(pAC, IoC, Port); + + return(SK_HW_PS_LINK); + } + + return(SK_HW_PS_NONE); +} /* SkGePortCheckUpGmac */ + + +#ifdef OTHER_PHY +/****************************************************************************** + * * SkGePortCheckUpLone - Check if the link is up * * return: @@ -1771,33 +1925,32 @@ * 1 Restart needed on this port * 2 Link came up */ -static int SkGePortCheckUpLone( +static int SkGePortCheckUpLone( SK_AC *pAC, /* Adapter Context */ SK_IOC IoC, /* IO Context */ int Port) /* Which port should be checked */ { SK_GEPORT *pPrt; /* GIni Port struct pointer */ - int Done; + int Done; SK_U16 Isrc; /* Interrupt source register */ SK_U16 LpAb; /* Link Partner Ability */ SK_U16 ExtStat; /* Extended Status Register */ SK_U16 PhyStat; /* Phy Status Register */ SK_U16 StatSum; - SK_BOOL AutoNeg; /* Is Autonegotiation used ? */ + SK_BOOL AutoNeg; /* Is Auto-negotiation used ? */ SK_U8 NextMode; /* Next AutoSensing Mode */ pPrt = &pAC->GIni.GP[Port]; if (pPrt->PHWLinkUp) { - return (SK_HW_PS_NONE); + return(SK_HW_PS_NONE); } StatSum = pPrt->PIsave; pPrt->PIsave = 0; /* Now wait for each ports link */ - if (pPrt->PLinkMode == SK_LMODE_HALF || - pPrt->PLinkMode == SK_LMODE_FULL) { + if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { AutoNeg = SK_FALSE; } else { @@ -1806,39 +1959,33 @@ /* * here we usually can check whether the link is in sync and - * autonegotiation is done. + * auto-negotiation is done. */ - XM_IN16(IoC, Port, XM_ISRC, &Isrc); - PHY_READ(IoC, pPrt, Port, PHY_LONE_STAT, &PhyStat); + SkXmPhyRead(pAC, IoC, Port, PHY_LONE_STAT, &PhyStat); StatSum |= PhyStat; - SkXmAutoNegLipaLone(pAC, IoC, Port, PhyStat); - if ((PhyStat & PHY_ST_LSYNC) == 0){ - /* - * Save Autonegotiation Done bit - */ + SkMacAutoNegLipaPhy(pAC, IoC, Port, PhyStat); + + if ((PhyStat & PHY_ST_LSYNC) == 0) { + /* Save Auto-negotiation Done bit */ pPrt->PIsave = (SK_U16)(StatSum & PHY_ST_AN_OVER); #ifdef DEBUG - if (pPrt->PIsave & PHY_ST_AN_OVER) { - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, + if ((pPrt->PIsave & PHY_ST_AN_OVER) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("AutoNeg done rescheduled Port %d\n", Port)); } -#endif - return (SK_HW_PS_NONE); +#endif /* DEBUG */ + return(SK_HW_PS_NONE); } if (AutoNeg) { - if (StatSum & PHY_ST_AN_OVER) { + if ((StatSum & PHY_ST_AN_OVER) != 0) { SkHWLinkUp(pAC, IoC, Port); - Done = SkXmAutoNegDone(pAC,IoC,Port); + Done = SkMacAutoNegDone(pAC, IoC, Port); if (Done != SK_AND_OK) { - /* Get PHY parameters, for debuging only */ - PHY_READ(IoC, pPrt, Port, - PHY_LONE_AUNE_LP, - &LpAb); - PHY_READ(IoC, pPrt, Port, - PHY_LONE_1000T_STAT, - &ExtStat); + /* Get PHY parameters, for debugging only */ + SkXmPhyRead(pAC, IoC, Port, PHY_LONE_AUNE_LP, &LpAb); + SkXmPhyRead(pAC, IoC, Port, PHY_LONE_1000T_STAT, &ExtStat); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("AutoNeg FAIL Port %d (LpAb %x, 1000TStat %x)\n", Port, LpAb, ExtStat)); @@ -1851,7 +1998,7 @@ SkHWSenseSetNext(pAC, IoC, Port, NextMode); } - return (SK_HW_PS_RESTART); + return(SK_HW_PS_RESTART); } else { @@ -1859,71 +2006,54 @@ * Dummy Read interrupt status to prevent * extra link down/ups */ - PHY_READ(IoC, pPrt, Port, PHY_LONE_INT_STAT, &ExtStat); + SkXmPhyRead(pAC, IoC, Port, PHY_LONE_INT_STAT, &ExtStat); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("AutoNeg done Port %d\n", Port)); - return (SK_HW_PS_LINK); + return(SK_HW_PS_LINK); } - } + } - /* - * AutoNeg not done, but HW link is up. Check for timeouts - */ - pPrt->PAutoNegTimeOut ++; + /* AutoNeg not done, but HW link is up. Check for timeouts */ + pPrt->PAutoNegTimeOut++; if (pPrt->PAutoNegTimeOut >= SK_AND_MAX_TO) { - /* - * Timeout occured. - * What do we need now? - */ - SK_DBG_MSG(pAC,SK_DBGMOD_HWM, - SK_DBGCAT_IRQ, - ("AutoNeg timeout Port %d\n", - Port)); + /* Timeout occured */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("AutoNeg timeout Port %d\n", Port)); if (pPrt->PLinkModeConf == SK_LMODE_AUTOSENSE && pPrt->PLipaAutoNeg != SK_LIPA_AUTO) { - /* - * Timeout occured - * Set Link manually up. - */ - SkHWSenseSetNext(pAC, IoC, Port, - SK_LMODE_FULL); - SK_DBG_MSG(pAC,SK_DBGMOD_HWM, - SK_DBGCAT_IRQ, - ("Set manual full duplex Port %d\n", - Port)); + /* Set Link manually up */ + SkHWSenseSetNext(pAC, IoC, Port, SK_LMODE_FULL); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("Set manual full duplex Port %d\n", Port)); } - /* - * Do the restart - */ - return (SK_HW_PS_RESTART); + /* Do the restart */ + return(SK_HW_PS_RESTART); } } else { - /* - * Link is up and we don't need more. - */ -#ifdef DEBUG + /* Link is up and we don't need more */ +#ifdef DEBUG if (pPrt->PLipaAutoNeg == SK_LIPA_AUTO) { SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("ERROR: Lipa auto detected on port %d\n", Port)); } -#endif +#endif /* DEBUG */ /* * Dummy Read interrupt status to prevent * extra link down/ups */ - PHY_READ(IoC, pPrt, Port, PHY_LONE_INT_STAT, &ExtStat); + SkXmPhyRead(pAC, IoC, Port, PHY_LONE_INT_STAT, &ExtStat); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, ("Link sync(GP), Port %d\n", Port)); SkHWLinkUp(pAC, IoC, Port); - return (SK_HW_PS_LINK); + return(SK_HW_PS_LINK); } - return (SK_HW_PS_NONE); -} /* SkGePortCheckUpLone*/ + return(SK_HW_PS_NONE); +} /* SkGePortCheckUpLone */ /****************************************************************************** @@ -1935,14 +2065,15 @@ * 1 Restart needed on this port * 2 Link came up */ -static int SkGePortCheckUpNat( +static int SkGePortCheckUpNat( SK_AC *pAC, /* Adapter Context */ SK_IOC IoC, /* IO Context */ int Port) /* Which port should be checked */ { /* todo: National */ - return (SK_HW_PS_NONE); + return(SK_HW_PS_NONE); } /* SkGePortCheckUpNat */ +#endif /* OTHER_PHY */ /****************************************************************************** @@ -1968,7 +2099,7 @@ SK_U8 Val8; Port = Para.Para32[0]; - pPrt = & pAC->GIni.GP[Port]; + pPrt = &pAC->GIni.GP[Port]; switch (Event) { case SK_HWEV_WATIM: @@ -2010,9 +2141,9 @@ } /* Todo: still needed for non-XMAC PHYs??? */ - /* Start workaround Errata #2 timer. */ - SkTimerStart(pAC, IoC, &pAC->GIni.GP[Port].PWaTimer, - Time, SKGE_HWAC, SK_HWEV_WATIM, Para); + /* Start workaround Errata #2 timer */ + SkTimerStart(pAC, IoC, &pPrt->PWaTimer, Time, + SKGE_HWAC, SK_HWEV_WATIM, Para); break; case SK_HWEV_PORT_START: @@ -2035,7 +2166,7 @@ break; case SK_HWEV_PORT_STOP: - if (pAC->GIni.GP[Port].PHWLinkUp) { + if (pPrt->PHWLinkUp) { /* * Signal directly to RLMT to ensure correct * sequence of SWITCH and RESET event. @@ -2087,6 +2218,10 @@ break; case SK_HWEV_SET_ROLE: + /* not possible for fiber */ + if (!pAC->GIni.GICopperType) { + break; + } Val8 = (SK_U8)Para.Para32[1]; if (pPrt->PMSMode != Val8) { /* Set New link mode */ @@ -2098,30 +2233,48 @@ } break; + case SK_HWEV_SET_SPEED: + if (pPrt->PhyType != SK_PHY_MARV_COPPER) { + break; + } + Val8 = (SK_U8)Para.Para32[1]; + if (pPrt->PLinkSpeed != Val8) { + /* Set New Speed parameter */ + pPrt->PLinkSpeed = Val8; + + /* Restart Port */ + SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_STOP, Para); + SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_START, Para); + } + break; + case SK_HWEV_HALFDUP_CHK: /* - * half duplex hangup workaround. See packet arbiter timeout - * interrupt for description + * half duplex hangup workaround. + * See packet arbiter timeout interrupt for description */ pPrt->HalfDupTimerActive = SK_FALSE; - if (pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || + if (pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF || pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) { + Len = sizeof(SK_U64); SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets, &Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, Port), pAC->Rlmt.Port[Port].Net->NetNumber); + if (pPrt->LastOctets == Octets) { - /* TX hanging, do a FIFO flush restarts it. */ - SkXmFlushTxFifo(pAC, IoC, Port); + /* Tx hanging, a FIFO flush restarts it */ + SkMacFlushTxFifo(pAC, IoC, Port); } } break; + default: SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_SIRQ_E001, SKERR_SIRQ_E001MSG); break; } - return (0); + return(0); } /* SkGeSirqEvent */ @@ -2144,25 +2297,13 @@ pPrt = &pAC->GIni.GP[Port]; - if (IStatus & PHY_B_IS_PSE) { - /* Incorrectable pair swap error. */ + if ((IStatus & PHY_B_IS_PSE) != 0) { + /* Incorrectable pair swap error */ SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E022, SKERR_SIRQ_E022MSG); } - if (IStatus & PHY_B_IS_MDXI_SC) { - /* not used */ - } - - if (IStatus & PHY_B_IS_HCT) { - /* not used */ - } - - if (IStatus & PHY_B_IS_LCT) { - /* not used */ - } - - if (IStatus & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) { + if ((IStatus & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) != 0) { Para.Para32[0] = (SK_U32)Port; SkHWLinkDown(pAC, IoC, Port); @@ -2171,47 +2312,58 @@ SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); /* Start workaround Errata #2 timer */ - SkTimerStart(pAC, IoC, &pPrt->PWaTimer, - SK_WA_INA_TIME, SKGE_HWAC, SK_HWEV_WATIM, Para); + SkTimerStart(pAC, IoC, &pPrt->PWaTimer, SK_WA_INA_TIME, + SKGE_HWAC, SK_HWEV_WATIM, Para); } - if (IStatus & PHY_B_IS_NO_HDCL) { - } +} /* SkPhyIsrBcom */ - if (IStatus & PHY_B_IS_NO_HDC) { - /* not used */ - } - if (IStatus & PHY_B_IS_NEG_USHDC) { - /* not used */ - } +/****************************************************************************** + * + * SkPhyIsrGmac - PHY interrupt service routine + * + * Description: handle all interrupts from Marvell PHY + * + * Returns: N/A + */ +static void SkPhyIsrGmac( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* Io Context */ +int Port, /* Port Num = PHY Num */ +SK_U16 IStatus) /* Interrupt Status */ +{ + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + SK_EVPARA Para; - if (IStatus & PHY_B_IS_SCR_S_ER) { - /* not used */ - } + pPrt = &pAC->GIni.GP[Port]; - if (IStatus & PHY_B_IS_RRS_CHANGE) { - /* not used */ - } + if ((IStatus & (PHY_M_IS_AN_PR | PHY_M_IS_LST_CHANGE)) != 0) { + Para.Para32[0] = (SK_U32)Port; - if (IStatus & PHY_B_IS_LRS_CHANGE) { - /* not used */ - } + SkHWLinkDown(pAC, IoC, Port); - if (IStatus & PHY_B_IS_DUP_CHANGE) { - /* not used */ + /* Signal to RLMT */ + SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); } - - if (IStatus & PHY_B_IS_LSP_CHANGE) { - /* not used */ + + if ((IStatus & PHY_M_IS_AN_ERROR) != 0) { + /* Auto-Negotiation Error */ + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E023, SKERR_SIRQ_E023MSG); } - - if (IStatus & PHY_B_IS_CRC_ER) { - /* not used */ + + if ((IStatus & PHY_M_IS_LSP_CHANGE) != 0) { + /* TBD */ } -} /* SkPhyIsrBcom */ + + if ((IStatus & PHY_M_IS_FIFO_ERROR) != 0) { + /* FIFO Overflow/Underrun Error */ + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E024, SKERR_SIRQ_E024MSG); + } +} /* SkPhyIsrGmac */ +#ifdef OTHER_PHY /****************************************************************************** * * SkPhyIsrLone - PHY interrupt service routine @@ -2228,49 +2380,15 @@ { SK_EVPARA Para; - if (IStatus & PHY_L_IS_CROSS) { - /* not used */ - } - - if (IStatus & PHY_L_IS_POL) { - /* not used */ - } - - if (IStatus & PHY_L_IS_SS) { - /* not used */ - } - - if (IStatus & PHY_L_IS_CFULL) { - /* not used */ - } - - if (IStatus & PHY_L_IS_AN_C) { - /* not used */ - } - - if (IStatus & PHY_L_IS_SPEED) { - /* not used */ - } - - if (IStatus & PHY_L_IS_CFULL) { - /* not used */ - } - if (IStatus & (PHY_L_IS_DUP | PHY_L_IS_ISOL)) { SkHWLinkDown(pAC, IoC, Port); /* Signal to RLMT */ Para.Para32[0] = (SK_U32)Port; SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); - - /* Start workaround Errata #2 timer */ - SkTimerStart(pAC, IoC, &pAC->GIni.GP[Port].PWaTimer, - SK_WA_INA_TIME, SKGE_HWAC, SK_HWEV_WATIM, Para); } - if (IStatus & PHY_L_IS_MDINT) { - /* not used */ - } } /* SkPhyIsrLone */ +#endif /* OTHER_PHY */ /* End of File */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/ski2c.c linux.21pre4-ac2/drivers/net/sk98lin/ski2c.c --- linux.21pre4/drivers/net/sk98lin/ski2c.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/ski2c.c 2003-01-06 15:38:18.000000000 +0000 @@ -2,16 +2,15 @@ * * Name: ski2c.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.47 $ - * Date: $Date: 2001/04/05 11:38:09 $ + * Version: $Revision: 1.56 $ + * Date: $Date: 2002/12/19 14:20:41 $ * Purpose: Functions to access Voltage and Temperature Sensor - * (taken from Monalisa (taken from Concentrator)) * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2001 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,6 +26,43 @@ * History: * * $Log: ski2c.c,v $ + * Revision 1.56 2002/12/19 14:20:41 rschmidt + * Added debugging code in SkI2cWait(). + * Replaced all I2C-write operations with function SkI2cWrite(). + * Fixed compiler warning because of uninitialized 'Time' in SkI2cEvent(). + * Editorial changes. + * + * Revision 1.55 2002/10/15 07:23:55 rschmidt + * Added setting of the GIYukon32Bit bool variable to distinguish + * 32-bit adapters. + * Editorial changes (TWSI). + * + * Revision 1.54 2002/08/13 09:05:06 rschmidt + * Added new thresholds if VAUX is not available (GIVauxAvail). + * Merged defines for PHY PLL 3V3 voltage (A and B). + * Editorial changes. + * + * Revision 1.53 2002/08/08 11:04:53 rwahl + * Added missing comment for revision 1.51 + * + * Revision 1.52 2002/08/08 10:09:02 jschmalz + * Sensor init state caused wrong error log entry + * + * Revision 1.51 2002/08/06 09:43:03 jschmalz + * Extensions and changes for Yukon + * + * Revision 1.50 2002/08/02 12:09:22 rschmidt + * Added support for YUKON sensors. + * Editorial changes. + * + * Revision 1.49 2002/07/30 11:07:52 rschmidt + * Replaced MaxSens init by update for Copper in SkI2cInit1(), + * because it was already initialized in SkI2cInit0(). + * Editorial changes. + * + * Revision 1.48 2001/08/16 12:44:33 afischer + * LM80 sensor init values corrected + * * Revision 1.47 2001/04/05 11:38:09 rassmann * Set SenState to idle in SkI2cWaitIrq(). * Changed error message in SkI2cWaitIrq(). @@ -178,7 +214,7 @@ * Revision 1.2 1998/08/11 07:27:15 gklug * add: functions of the interface * adapt rest of source to C coding Conventions - * rmv: unneccessary code taken from Mona Lisa + * rmv: unnecessary code taken from Mona Lisa * * Revision 1.1 1998/06/19 14:28:43 malthoff * Created. Sources taken from ML Projekt. @@ -192,7 +228,7 @@ * I2C Protocol */ static const char SysKonnectFileId[] = - "$Id: ski2c.c,v 1.47 2001/04/05 11:38:09 rassmann Exp $"; + "$Id: ski2c.c,v 1.56 2002/12/19 14:20:41 rschmidt Exp $"; #include "h/skdrv1st.h" /* Driver Specific Definitions */ #include "h/lm80.h" @@ -280,21 +316,21 @@ #ifndef I2C_SLOW_TIMING #define T_CLK_LOW 1300L /* clock low time in ns */ #define T_CLK_HIGH 600L /* clock high time in ns */ -#define T_DATA_IN_SETUP 100L /* data in Set-UP Time */ +#define T_DATA_IN_SETUP 100L /* data in Set-up Time */ #define T_START_HOLD 600L /* start condition hold time */ #define T_START_SETUP 600L /* start condition Set-up time */ #define T_STOP_SETUP 600L /* stop condition Set-up time */ -#define T_BUS_IDLE 1300L /* time the bus must free after tx */ +#define T_BUS_IDLE 1300L /* time the bus must free after Tx */ #define T_CLK_2_DATA_OUT 900L /* max. clock low to data output valid */ #else /* I2C_SLOW_TIMING */ /* I2C Standard Mode Timing */ #define T_CLK_LOW 4700L /* clock low time in ns */ #define T_CLK_HIGH 4000L /* clock high time in ns */ -#define T_DATA_IN_SETUP 250L /* data in Set-UP Time */ +#define T_DATA_IN_SETUP 250L /* data in Set-up Time */ #define T_START_HOLD 4000L /* start condition hold time */ -#define T_START_SETUP 4700L /* start condition Set_up time */ +#define T_START_SETUP 4700L /* start condition Set-up time */ #define T_STOP_SETUP 4000L /* stop condition Set-up time */ -#define T_BUS_IDLE 4700L /* time the bus must free after tx */ +#define T_BUS_IDLE 4700L /* time the bus must free after Tx */ #endif /* !I2C_SLOW_TIMING */ #define NS2BCLK(x) (((x)*125)/10000) @@ -312,9 +348,9 @@ #define I2C_DATA_HIGH(IoC) SK_I2C_SET_BIT(IoC, I2C_DATA) #define I2C_DATA_LOW(IoC) SK_I2C_CLR_BIT(IoC, I2C_DATA) #define I2C_DATA_OUT(IoC) SK_I2C_SET_BIT(IoC, I2C_DATA_DIR) -#define I2C_DATA_IN(IoC) SK_I2C_CLR_BIT(IoC, I2C_DATA_DIR|I2C_DATA) +#define I2C_DATA_IN(IoC) SK_I2C_CLR_BIT(IoC, I2C_DATA_DIR | I2C_DATA) #define I2C_CLK_HIGH(IoC) SK_I2C_SET_BIT(IoC, I2C_CLK) -#define I2C_CLK_LOW(IoC) SK_I2C_CLR_BIT(IoC, I2C_CLK|I2C_DATA_DIR) +#define I2C_CLK_LOW(IoC) SK_I2C_CLR_BIT(IoC, I2C_CLK | I2C_DATA_DIR) #define I2C_START_COND(IoC) SK_I2C_CLR_BIT(IoC, I2C_CLK) #define NS2CLKT(x) ((x*125L)/10000) @@ -331,7 +367,8 @@ I2C_DATA_OUT(IoC); if (Bit) { I2C_DATA_HIGH(IoC); - } else { + } + else { I2C_DATA_LOW(IoC); } SkDgWaitTime(IoC, NS2BCLK(T_DATA_IN_SETUP)); @@ -428,11 +465,8 @@ SkDgWaitTime(IoC, NS2BCLK(T_CLK_HIGH)); SK_I2C_GET_SW(IoC, &I2cSwCtrl); - if (I2cSwCtrl & I2C_DATA) { - Bit = 1; - } else { - Bit = 0; - } + + Bit = (I2cSwCtrl & I2C_DATA) ? 1 : 0; I2C_CLK_LOW(IoC); SkDgWaitTime(IoC, NS2BCLK(T_CLK_LOW-T_CLK_2_DATA_OUT)); @@ -444,16 +478,16 @@ /* * Receive an ACK. * - * returns 0 If acknoledged + * returns 0 If acknowledged * 1 in case of an error */ int SkI2cRcvAck( -SK_IOC IoC) /* I/O Context */ +SK_IOC IoC) /* I/O Context */ { /* * Received bit must be zero. */ - return (SkI2cRcvBit(IoC) != 0); + return(SkI2cRcvBit(IoC) != 0); } /* SkI2cRcvAck */ @@ -461,7 +495,7 @@ * Send an NACK. */ void SkI2cSndNAck( -SK_IOC IoC) /* I/O Context */ +SK_IOC IoC) /* I/O Context */ { /* * Received bit must be zero. @@ -487,18 +521,19 @@ /* * Send one byte to the I2C device and wait for ACK. * - * Return acknoleged status. + * Return acknowleged status. */ int SkI2cSndByte( SK_IOC IoC, /* I/O Context */ -int Byte) /* byte to send */ +int Byte) /* byte to send */ { int i; for (i = 0; i < 8; i++) { if (Byte & (1<<(7-i))) { SkI2cSndBit(IoC, 1); - } else { + } + else { SkI2cSndBit(IoC, 0); } } @@ -514,7 +549,7 @@ */ int SkI2cRcvByte( SK_IOC IoC, /* I/O Context */ -int Last) /* Last Byte Flag */ +int Last) /* Last Byte Flag */ { int i; int Byte = 0; @@ -526,7 +561,8 @@ if (Last) { SkI2cSndNAck(IoC); - } else { + } + else { SkI2cSndAck(IoC); } @@ -537,7 +573,7 @@ /* * Start dialog and send device address * - * Return 0 if acknoleged, 1 in case of an error + * Return 0 if acknowleged, 1 in case of an error */ int SkI2cSndDev( SK_IOC IoC, /* I/O Context */ @@ -545,7 +581,7 @@ int Rw) /* Read / Write Flag */ { SkI2cStart(IoC); - Rw = ~Rw; + Rw = ~Rw; Rw &= I2C_WRITE; return(SkI2cSndByte(IoC, (Addr<<1) | Rw)); } /* SkI2cSndDev */ @@ -567,18 +603,33 @@ int Event) /* complete event to wait for (I2C_READ or I2C_WRITE) */ { SK_U64 StartTime; + SK_U64 CurrentTime; SK_U32 I2cCtrl; StartTime = SkOsGetTime(pAC); + do { - if (SkOsGetTime(pAC) - StartTime > SK_TICKS_PER_SEC / 8) { + CurrentTime = SkOsGetTime(pAC); + + if (CurrentTime - StartTime > SK_TICKS_PER_SEC / 8) { + SK_I2C_STOP(IoC); #ifndef SK_DIAG SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_I2C_E002, SKERR_I2C_E002MSG); #endif /* !SK_DIAG */ return(1); } + SK_I2C_GET_CTL(IoC, &I2cCtrl); + +#ifdef xYUKON_DBG + printf("StartTime=%lu, CurrentTime=%lu\n", + StartTime, CurrentTime); + if (kbhit()) { + return(1); + } +#endif /* YUKON_DBG */ + } while ((I2cCtrl & I2C_FLAG) == (SK_U32)Event << 31); return(0); @@ -591,7 +642,7 @@ * Returns * Nothing */ -void SkI2cWaitIrq( +void SkI2cWaitIrq( SK_AC *pAC, /* Adapter Context */ SK_IOC IoC) /* I/O Context */ { @@ -621,8 +672,6 @@ return; } /* SkI2cWaitIrq */ -#ifdef SK_DIAG - /* * writes a single byte or 4 bytes into the I2C device * @@ -635,14 +684,17 @@ SK_U32 I2cData, /* I2C Data to write */ int I2cDev, /* I2C Device Address */ int I2cReg, /* I2C Device Register Address */ -int I2cBurst) /* I2C Burst Flag ( 0 || I2C_BURST ) */ +int I2cBurst) /* I2C Burst Flag */ { SK_OUT32(IoC, B2_I2C_DATA, I2cData); SK_I2C_CTL(IoC, I2C_WRITE, I2cDev, I2cReg, I2cBurst); + return(SkI2cWait(pAC, IoC, I2C_WRITE)); } /* SkI2cWrite*/ +#ifdef SK_DIAG + /* * reads a single byte or 4 bytes from the I2C device * @@ -653,15 +705,17 @@ SK_IOC IoC, /* I/O Context */ int I2cDev, /* I2C Device Address */ int I2cReg, /* I2C Device Register Address */ -int I2cBurst) /* I2C Burst Flag ( 0 || I2C_BURST ) */ +int I2cBurst) /* I2C Burst Flag */ { SK_U32 Data; SK_OUT32(IoC, B2_I2C_DATA, 0); SK_I2C_CTL(IoC, I2C_READ, I2cDev, I2cReg, I2cBurst); - if (SkI2cWait(pAC, IoC, I2C_READ)) { - w_print("I2C Transfer Timeout!\n"); + + if (SkI2cWait(pAC, IoC, I2C_READ) != 0) { + w_print("%s\n", SKERR_I2C_E002MSG); } + SK_IN32(IoC, B2_I2C_DATA, &Data); return(Data); } /* SkI2cRead */ @@ -684,13 +738,17 @@ SK_IOC IoC, /* I/O Context */ SK_SENSOR *pSen) /* Sensor to be read */ { - return((*pSen->SenRead)(pAC, IoC, pSen)); + if (pSen->SenRead != NULL) { + return((*pSen->SenRead)(pAC, IoC, pSen)); + } + else + return(0); /* no success */ } /* SkI2cReadSensor*/ /* * Do the Init state 0 initialization */ -static int SkI2cInit0( +static int SkI2cInit0( SK_AC *pAC) /* Adapter Context */ { int i; @@ -698,109 +756,33 @@ /* Begin with first sensor */ pAC->I2c.CurrSens = 0; - /* Set to mimimum sensor number */ - pAC->I2c.MaxSens = SK_MIN_SENSORS; + /* Begin with timeout control for state machine */ + pAC->I2c.TimerMode = SK_TIMER_WATCH_STATEMACHINE; + + /* Set sensor number to zero */ + pAC->I2c.MaxSens = 0; #ifndef SK_DIAG /* Initialize Number of Dummy Reads */ pAC->I2c.DummyReads = SK_MAX_SENSORS; #endif - for (i = 0; i < SK_MAX_SENSORS; i ++) { - switch (i) { - case 0: - pAC->I2c.SenTable[i].SenDesc = "Temperature"; - pAC->I2c.SenTable[i].SenType = SK_SEN_TEMP; - pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_ERRHIGH0; - pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_ERRLOW0; - pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_WARNHIGH0; - pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_WARNLOW0; - pAC->I2c.SenTable[i].SenReg = LM80_TEMP_IN; - pAC->I2c.SenTable[i].SenInit = SK_TRUE; - break; - case 1: - pAC->I2c.SenTable[i].SenDesc = "Voltage PCI"; - pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; - pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_ERRHIGH1; - pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_ERRLOW1; - pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_WARNHIGH1; - pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_WARNLOW1; - pAC->I2c.SenTable[i].SenReg = LM80_VT0_IN; - pAC->I2c.SenTable[i].SenInit = SK_TRUE; - break; - case 2: - pAC->I2c.SenTable[i].SenDesc = "Voltage PCI-IO"; - pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; - pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_ERRHIGH2; - pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_ERRLOW2; - pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_WARNHIGH2; - pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_WARNLOW2; - pAC->I2c.SenTable[i].SenReg = LM80_VT1_IN; - pAC->I2c.SenTable[i].SenInit = SK_FALSE; - break; - case 3: - pAC->I2c.SenTable[i].SenDesc = "Voltage ASIC"; - pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; - pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_ERRHIGH3; - pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_ERRLOW3; - pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_WARNHIGH3; - pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_WARNLOW3; - pAC->I2c.SenTable[i].SenReg = LM80_VT2_IN; - pAC->I2c.SenTable[i].SenInit = SK_TRUE; - break; - case 4: - pAC->I2c.SenTable[i].SenDesc = "Voltage PMA"; - pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; - pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_ERRHIGH4; - pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_ERRLOW4; - pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_WARNHIGH4; - pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_WARNLOW4; - pAC->I2c.SenTable[i].SenReg = LM80_VT3_IN; - pAC->I2c.SenTable[i].SenInit = SK_TRUE; - break; - case 5: - pAC->I2c.SenTable[i].SenDesc = "Voltage PHY 2V5"; - pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; - pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_ERRHIGH5; - pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_ERRLOW5; - pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_WARNHIGH5; - pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_WARNLOW5; - pAC->I2c.SenTable[i].SenReg = LM80_VT4_IN; - pAC->I2c.SenTable[i].SenInit = SK_TRUE; - break; - case 6: - pAC->I2c.SenTable[i].SenDesc = "Voltage PHY B PLL"; - pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; - pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_ERRHIGH6; - pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_ERRLOW6; - pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_WARNHIGH6; - pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_WARNLOW6; - pAC->I2c.SenTable[i].SenReg = LM80_VT5_IN; - pAC->I2c.SenTable[i].SenInit = SK_TRUE; - break; - case 7: - pAC->I2c.SenTable[i].SenDesc = "Speed Fan"; - pAC->I2c.SenTable[i].SenType = SK_SEN_FAN; - pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_ERRHIGH; - pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_ERRLOW; - pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_WARNHIGH; - pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_WARNLOW; - pAC->I2c.SenTable[i].SenReg = LM80_FAN2_IN; - pAC->I2c.SenTable[i].SenInit = SK_TRUE; - break; - default: - SK_ERR_LOG(pAC, SK_ERRCL_INIT | SK_ERRCL_SW, - SKERR_I2C_E001, SKERR_I2C_E001MSG); - break; - } - + for (i = 0; i < SK_MAX_SENSORS; i++) { + pAC->I2c.SenTable[i].SenDesc = "unknown"; + pAC->I2c.SenTable[i].SenType = SK_SEN_UNKNOWN; + pAC->I2c.SenTable[i].SenThreErrHigh = 0; + pAC->I2c.SenTable[i].SenThreErrLow = 0; + pAC->I2c.SenTable[i].SenThreWarnHigh = 0; + pAC->I2c.SenTable[i].SenThreWarnLow = 0; + pAC->I2c.SenTable[i].SenReg = LM80_FAN2_IN; + pAC->I2c.SenTable[i].SenInit = SK_SEN_DYN_INIT_NONE; pAC->I2c.SenTable[i].SenValue = 0; - pAC->I2c.SenTable[i].SenErrFlag = SK_SEN_ERR_OK; + pAC->I2c.SenTable[i].SenErrFlag = SK_SEN_ERR_NOT_PRESENT; pAC->I2c.SenTable[i].SenErrCts = 0; pAC->I2c.SenTable[i].SenBegErrTS = 0; pAC->I2c.SenTable[i].SenState = SK_SEN_IDLE; - pAC->I2c.SenTable[i].SenRead = SkLm80ReadSensor; - pAC->I2c.SenTable[i].SenDev = LM80_ADDR; + pAC->I2c.SenTable[i].SenRead = NULL; + pAC->I2c.SenTable[i].SenDev = 0; } /* Now we are "INIT data"ed */ @@ -829,68 +811,211 @@ * - all 0s * */ -static int SkI2cInit1( +static int SkI2cInit1( SK_AC *pAC, /* Adapter Context */ SK_IOC IoC) /* I/O Context */ { + int i; + SK_U8 I2cSwCtrl; + SK_GEPORT *pPrt; /* GIni Port struct pointer */ + if (pAC->I2c.InitLevel != SK_INIT_DATA) { /* ReInit not needed in I2C module */ return(0); } - SK_OUT32(IoC, B2_I2C_DATA, 0); - SK_I2C_CTL(IoC, I2C_WRITE, LM80_ADDR, LM80_CFG, 0); - (void)SkI2cWait(pAC, IoC, I2C_WRITE); - - SK_OUT32(IoC, B2_I2C_DATA, 0xff); - SK_I2C_CTL(IoC, I2C_WRITE, LM80_ADDR, LM80_IMSK_1, 0); - (void)SkI2cWait(pAC, IoC, I2C_WRITE); - - SK_OUT32(IoC, B2_I2C_DATA, 0xff); - SK_I2C_CTL(IoC, I2C_WRITE, LM80_ADDR, LM80_IMSK_2, 0); - (void)SkI2cWait(pAC, IoC, I2C_WRITE); - - SK_OUT32(IoC, B2_I2C_DATA, 0x0); - SK_I2C_CTL(IoC, I2C_WRITE, LM80_ADDR, LM80_FAN_CTRL, 0); - (void)SkI2cWait(pAC, IoC, I2C_WRITE); - - SK_OUT32(IoC, B2_I2C_DATA, 0); - SK_I2C_CTL(IoC, I2C_WRITE, LM80_ADDR, LM80_TEMP_CTRL, 0); - (void)SkI2cWait(pAC, IoC, I2C_WRITE); + /* Set the Direction of I2C-Data Pin to IN */ + SK_I2C_CLR_BIT(IoC, I2C_DATA_DIR | I2C_DATA); + /* Check for 32-Bit Yukon with Low at I2C-Data Pin */ + SK_I2C_GET_SW(IoC, &I2cSwCtrl); - SK_OUT32(IoC, B2_I2C_DATA, LM80_CFG_START); - SK_I2C_CTL(IoC, I2C_WRITE, LM80_ADDR, LM80_CFG, 0); - (void)SkI2cWait(pAC, IoC, I2C_WRITE); + if ((I2cSwCtrl & I2C_DATA) == 0) { + /* this is a 32-Bit board */ + pAC->GIni.GIYukon32Bit = SK_TRUE; + return(0); + } + + /* Check for 64 Bit Yukon without sensors */ + if (SkI2cWrite(pAC, IoC, 0, LM80_ADDR, LM80_CFG, 0) != 0) { + return(0); + } + (void)SkI2cWrite(pAC, IoC, 0xff, LM80_ADDR, LM80_IMSK_1, 0); + + (void)SkI2cWrite(pAC, IoC, 0xff, LM80_ADDR, LM80_IMSK_2, 0); + + (void)SkI2cWrite(pAC, IoC, 0, LM80_ADDR, LM80_FAN_CTRL, 0); + + (void)SkI2cWrite(pAC, IoC, 0, LM80_ADDR, LM80_TEMP_CTRL, 0); + + (void)SkI2cWrite(pAC, IoC, LM80_CFG_START, LM80_ADDR, LM80_CFG, 0); + /* - * MaxSens has to be initialized here, because PhyType is not - * set when performing Init Level 1 + * MaxSens has to be updated here, because PhyType is not + * set when performing Init Level 0 */ - switch (pAC->GIni.GP[0].PhyType) { - case SK_PHY_XMAC: - pAC->I2c.MaxSens = 5; - break; + pAC->I2c.MaxSens = 5; + + pPrt = &pAC->GIni.GP[0]; + + switch (pPrt->PhyType) { case SK_PHY_BCOM: - pAC->I2c.SenTable[4].SenDesc = "Voltage PHY A PLL"; if (pAC->GIni.GIMacsFound == 1) { - pAC->I2c.MaxSens = 6; - } + pAC->I2c.MaxSens += 1; + } else { - pAC->I2c.MaxSens = 8; + pAC->I2c.MaxSens += 3; } break; - case SK_PHY_LONE: - pAC->I2c.MaxSens = 5; + case SK_PHY_MARV_COPPER: + pAC->I2c.MaxSens += 3; break; } + for (i = 0; i < pAC->I2c.MaxSens; i++) { + switch (i) { + case 0: + pAC->I2c.SenTable[i].SenDesc = "Temperature"; + pAC->I2c.SenTable[i].SenType = SK_SEN_TEMP; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_TEMP_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_TEMP_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_TEMP_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_TEMP_LOW_ERR; + pAC->I2c.SenTable[i].SenReg = LM80_TEMP_IN; + break; + case 1: + pAC->I2c.SenTable[i].SenDesc = "Voltage PCI"; + pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PCI_5V_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PCI_5V_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PCI_5V_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PCI_5V_LOW_ERR; + pAC->I2c.SenTable[i].SenReg = LM80_VT0_IN; + break; + case 2: + pAC->I2c.SenTable[i].SenDesc = "Voltage PCI-IO"; + pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PCI_IO_5V_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PCI_IO_5V_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PCI_IO_3V3_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PCI_IO_3V3_LOW_ERR; + pAC->I2c.SenTable[i].SenReg = LM80_VT1_IN; + pAC->I2c.SenTable[i].SenInit = SK_SEN_DYN_INIT_PCI_IO; + break; + case 3: + pAC->I2c.SenTable[i].SenDesc = "Voltage ASIC"; + pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_VDD_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_VDD_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_VDD_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_VDD_LOW_ERR; + pAC->I2c.SenTable[i].SenReg = LM80_VT2_IN; + break; + case 4: + if (pPrt->PhyType == SK_PHY_BCOM) { + pAC->I2c.SenTable[i].SenDesc = "Voltage PHY A PLL"; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PLL_3V3_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PLL_3V3_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PLL_3V3_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PLL_3V3_LOW_ERR; + } + else if (pPrt->PhyType == SK_PHY_MARV_COPPER) { + pAC->I2c.SenTable[i].SenDesc = "Voltage VAUX"; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_VAUX_3V3_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_VAUX_3V3_HIGH_WARN; + if (pAC->GIni.GIVauxAvail) { + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_VAUX_3V3_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_VAUX_3V3_LOW_ERR; + } + else { + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_VAUX_0V_WARN_ERR; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_VAUX_0V_WARN_ERR; + } + } + else { + pAC->I2c.SenTable[i].SenDesc = "Voltage PMA"; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PLL_3V3_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PLL_3V3_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PLL_3V3_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PLL_3V3_LOW_ERR; + } + pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; + pAC->I2c.SenTable[i].SenReg = LM80_VT3_IN; + break; + case 5: + if (pPrt->PhyType == SK_PHY_MARV_COPPER) { + pAC->I2c.SenTable[i].SenDesc = "Voltage ASIC-Co 1V5"; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_CORE_1V5_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_CORE_1V5_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_CORE_1V5_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_CORE_1V5_LOW_ERR; + } + else { + pAC->I2c.SenTable[i].SenDesc = "Voltage PHY 2V5"; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PHY_2V5_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PHY_2V5_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PHY_2V5_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PHY_2V5_LOW_ERR; + } + pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; + pAC->I2c.SenTable[i].SenReg = LM80_VT4_IN; + break; + case 6: + if (pPrt->PhyType == SK_PHY_MARV_COPPER) { + pAC->I2c.SenTable[i].SenDesc = "Voltage PHY 3V3"; + } + else { + pAC->I2c.SenTable[i].SenDesc = "Voltage PHY B PLL"; + } + pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PLL_3V3_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PLL_3V3_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PLL_3V3_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PLL_3V3_LOW_ERR; + pAC->I2c.SenTable[i].SenReg = LM80_VT5_IN; + break; + case 7: + if (pPrt->PhyType == SK_PHY_MARV_COPPER) { + pAC->I2c.SenTable[i].SenDesc = "Voltage PHY 2V5"; + pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PHY_2V5_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PHY_2V5_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PHY_2V5_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PHY_2V5_LOW_ERR; + pAC->I2c.SenTable[i].SenReg = LM80_VT6_IN; + } + else { + pAC->I2c.SenTable[i].SenDesc = "Speed Fan"; + pAC->I2c.SenTable[i].SenType = SK_SEN_FAN; + pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_FAN_HIGH_ERR; + pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_FAN_HIGH_WARN; + pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_FAN_LOW_WARN; + pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_FAN_LOW_ERR; + pAC->I2c.SenTable[i].SenReg = LM80_FAN2_IN; + } + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_INIT | SK_ERRCL_SW, + SKERR_I2C_E001, SKERR_I2C_E001MSG); + break; + } + + pAC->I2c.SenTable[i].SenValue = 0; + pAC->I2c.SenTable[i].SenErrFlag = SK_SEN_ERR_OK; + pAC->I2c.SenTable[i].SenErrCts = 0; + pAC->I2c.SenTable[i].SenBegErrTS = 0; + pAC->I2c.SenTable[i].SenState = SK_SEN_IDLE; + pAC->I2c.SenTable[i].SenRead = SkLm80ReadSensor; + pAC->I2c.SenTable[i].SenDev = LM80_ADDR; + } + #ifndef SK_DIAG pAC->I2c.DummyReads = pAC->I2c.MaxSens; - - /* Clear the interrupt source */ - SK_OUT32(IoC, B2_I2C_IRQ, I2C_CLR_IRQ); #endif /* !SK_DIAG */ + /* Clear I2C IRQ */ + SK_OUT32(IoC, B2_I2C_IRQ, I2C_CLR_IRQ); + /* Now we are I/O initialized */ pAC->I2c.InitLevel = SK_INIT_IO; return(0); @@ -900,7 +1025,7 @@ /* * Init level 2: Start first sensor read. */ -static int SkI2cInit2( +static int SkI2cInit2( SK_AC *pAC, /* Adapter Context */ SK_IOC IoC) /* I/O Context */ { @@ -977,13 +1102,13 @@ * * Starts the timer if necessary. */ -void SkI2cIsr( +void SkI2cIsr( SK_AC *pAC, /* Adapter Context */ SK_IOC IoC) /* I/O Context */ { SK_EVPARA Para; - /* Clear the interrupt source */ + /* Clear I2C IRQ */ SK_OUT32(IoC, B2_I2C_IRQ, I2C_CLR_IRQ); Para.Para64 = 0; @@ -994,7 +1119,7 @@ /* * Check this sensors Value against the threshold and send events. */ -static void SkI2cCheckSensor( +static void SkI2cCheckSensor( SK_AC *pAC, /* Adapter Context */ SK_SENSOR *pSen) { @@ -1008,7 +1133,7 @@ /* Check Dummy Reads first */ if (pAC->I2c.DummyReads > 0) { - pAC->I2c.DummyReads --; + pAC->I2c.DummyReads--; return; } @@ -1016,7 +1141,7 @@ CurrTime = SkOsGetTime(pAC); /* Set para to the most useful setting: The current sensor. */ - ParaLocal.Para64 = (SK_U64) pAC->I2c.CurrSens; + ParaLocal.Para64 = (SK_U64)pAC->I2c.CurrSens; /* Check the Value against the thresholds. First: Error Thresholds */ TooHigh = (pSen->SenValue > pSen->SenThreErrHigh); @@ -1053,9 +1178,9 @@ */ DoErrLog = SK_FALSE; } - } else { - /* We came from a different state */ - /* -> Set Begin Time Stamp */ + } + else { + /* We came from a different state -> Set Begin Time Stamp */ pSen->SenBegErrTS = CurrTime; pSen->SenErrFlag = SK_SEN_ERR_ERR; } @@ -1063,7 +1188,7 @@ if (DoTrapSend) { /* Set current Time */ pSen->SenLastErrTrapTS = CurrTime; - pSen->SenErrCts ++; + pSen->SenErrCts++; /* Queue PNMI Event */ SkEventQueue(pAC, SKGE_PNMI, (TooHigh ? @@ -1095,7 +1220,6 @@ TooHigh = (pSen->SenValue > pSen->SenThreWarnHigh); TooLow = (pSen->SenValue < pSen->SenThreWarnLow); - if (!IsError && (TooHigh || TooLow)) { /* Error condition is satisfied */ DoTrapSend = SK_TRUE; @@ -1123,9 +1247,9 @@ */ DoErrLog = SK_FALSE; } - } else { - /* We came from a different state */ - /* -> Set Begin Time Stamp */ + } + else { + /* We came from a different state -> Set Begin Time Stamp */ pSen->SenBegWarnTS = CurrTime; pSen->SenErrFlag = SK_SEN_ERR_WARN; } @@ -1133,7 +1257,7 @@ if (DoTrapSend) { /* Set current Time */ pSen->SenLastWarnTrapTS = CurrTime; - pSen->SenWarnCts ++; + pSen->SenWarnCts++; /* Queue PNMI Event */ SkEventQueue(pAC, SKGE_PNMI, (TooHigh ? @@ -1168,6 +1292,43 @@ /* End of check against the thresholds */ + /* Bug fix AF: 16.Aug.2001: Correct the init base + * of LM80 sensor. + */ + if (pSen->SenInit == SK_SEN_DYN_INIT_PCI_IO) { + + pSen->SenInit = SK_SEN_DYN_INIT_NONE; + + if (pSen->SenValue > SK_SEN_PCI_IO_RANGE_LIMITER) { + /* 5V PCI-IO Voltage */ + pSen->SenThreWarnLow = SK_SEN_PCI_IO_5V_LOW_WARN; + pSen->SenThreErrLow = SK_SEN_PCI_IO_5V_LOW_ERR; + } + else { + /* 3.3V PCI-IO Voltage */ + pSen->SenThreWarnHigh = SK_SEN_PCI_IO_3V3_HIGH_WARN; + pSen->SenThreErrHigh = SK_SEN_PCI_IO_3V3_HIGH_ERR; + } + } + +#if 0 + /* Dynamic thresholds also for VAUX of LM80 sensor */ + if (pSen->SenInit == SK_SEN_DYN_INIT_VAUX) { + + pSen->SenInit = SK_SEN_DYN_INIT_NONE; + + /* 3.3V VAUX Voltage */ + if (pSen->SenValue > SK_SEN_VAUX_RANGE_LIMITER) { + pSen->SenThreWarnLow = SK_SEN_VAUX_3V3_LOW_WARN; + pSen->SenThreErrLow = SK_SEN_VAUX_3V3_LOW_ERR; + } + /* 0V VAUX Voltage */ + else { + pSen->SenThreWarnHigh = SK_SEN_VAUX_0V_WARN_ERR; + pSen->SenThreErrHigh = SK_SEN_VAUX_0V_WARN_ERR; + } + } + /* * Check initialization state: * The VIO Thresholds need adaption @@ -1175,20 +1336,21 @@ if (!pSen->SenInit && pSen->SenReg == LM80_VT1_IN && pSen->SenValue > SK_SEN_WARNLOW2C && pSen->SenValue < SK_SEN_WARNHIGH2) { - pSen->SenThreErrLow = SK_SEN_ERRLOW2C; - pSen->SenThreWarnLow = SK_SEN_WARNLOW2C; + pSen->SenThreErrLow = SK_SEN_ERRLOW2C; + pSen->SenThreWarnLow = SK_SEN_WARNLOW2C; pSen->SenInit = SK_TRUE; } if (!pSen->SenInit && pSen->SenReg == LM80_VT1_IN && pSen->SenValue > SK_SEN_WARNLOW2 && pSen->SenValue < SK_SEN_WARNHIGH2C) { - pSen->SenThreErrHigh = SK_SEN_ERRHIGH2C; - pSen->SenThreWarnHigh = SK_SEN_WARNHIGH2C; + pSen->SenThreErrHigh = SK_SEN_ERRHIGH2C; + pSen->SenThreWarnHigh = SK_SEN_WARNHIGH2C; pSen->SenInit = SK_TRUE; } +#endif - if (!pSen->SenInit) { + if (pSen->SenInit != SK_SEN_DYN_INIT_NONE) { SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E013, SKERR_I2C_E013MSG); } } /* SkI2cCheckSensor*/ @@ -1210,9 +1372,13 @@ SK_EVPARA ParaLocal; int i; + /* New case: no sensors */ + if (pAC->I2c.MaxSens == 0) { + return(0); + } + switch (Event) { case SK_I2CEV_IRQ: - case SK_I2CEV_TIM: pSen = &pAC->I2c.SenTable[pAC->I2c.CurrSens]; ReadComplete = SkI2cReadSensor(pAC, IoC, pSen); @@ -1220,23 +1386,92 @@ /* Check sensor against defined thresholds */ SkI2cCheckSensor (pAC, pSen); - /* Increment Current and set appropriate Timeout */ - Time = SK_I2C_TIM_SHORT; + /* Increment Current sensor and set appropriate Timeout */ + pAC->I2c.CurrSens++; + if (pAC->I2c.CurrSens >= pAC->I2c.MaxSens) { + pAC->I2c.CurrSens = 0; + Time = SK_I2C_TIM_LONG; + } + else { + Time = SK_I2C_TIM_SHORT; + } + + /* Start Timer */ + ParaLocal.Para64 = (SK_U64)0; - pAC->I2c.CurrSens ++; + pAC->I2c.TimerMode = SK_TIMER_NEW_GAUGING; + + SkTimerStart(pAC, IoC, &pAC->I2c.SenTimer, Time, + SKGE_I2C, SK_I2CEV_TIM, ParaLocal); + } + else { + /* Start Timer */ + ParaLocal.Para64 = (SK_U64)0; + + pAC->I2c.TimerMode = SK_TIMER_WATCH_STATEMACHINE; + + SkTimerStart(pAC, IoC, &pAC->I2c.SenTimer, SK_I2C_TIM_WATCH, + SKGE_I2C, SK_I2CEV_TIM, ParaLocal); + } + break; + case SK_I2CEV_TIM: + if (pAC->I2c.TimerMode == SK_TIMER_NEW_GAUGING) { + + ParaLocal.Para64 = (SK_U64)0; + SkTimerStop(pAC, IoC, &pAC->I2c.SenTimer); + + pSen = &pAC->I2c.SenTable[pAC->I2c.CurrSens]; + ReadComplete = SkI2cReadSensor(pAC, IoC, pSen); + + if (ReadComplete) { + /* Check sensor against defined thresholds */ + SkI2cCheckSensor (pAC, pSen); + + /* Increment Current sensor and set appropriate Timeout */ + pAC->I2c.CurrSens++; + if (pAC->I2c.CurrSens == pAC->I2c.MaxSens) { + pAC->I2c.CurrSens = 0; + Time = SK_I2C_TIM_LONG; + } + else { + Time = SK_I2C_TIM_SHORT; + } + + /* Start Timer */ + ParaLocal.Para64 = (SK_U64)0; + + pAC->I2c.TimerMode = SK_TIMER_NEW_GAUGING; + + SkTimerStart(pAC, IoC, &pAC->I2c.SenTimer, Time, + SKGE_I2C, SK_I2CEV_TIM, ParaLocal); + } + } + else { + pSen = &pAC->I2c.SenTable[pAC->I2c.CurrSens]; + pSen->SenErrFlag = SK_SEN_ERR_FAULTY; + SK_I2C_STOP(IoC); + + /* Increment Current sensor and set appropriate Timeout */ + pAC->I2c.CurrSens++; if (pAC->I2c.CurrSens == pAC->I2c.MaxSens) { pAC->I2c.CurrSens = 0; Time = SK_I2C_TIM_LONG; } + else { + Time = SK_I2C_TIM_SHORT; + } /* Start Timer */ - ParaLocal.Para64 = (SK_U64) 0; + ParaLocal.Para64 = (SK_U64)0; + + pAC->I2c.TimerMode = SK_TIMER_NEW_GAUGING; + SkTimerStart(pAC, IoC, &pAC->I2c.SenTimer, Time, SKGE_I2C, SK_I2CEV_TIM, ParaLocal); } break; case SK_I2CEV_CLEAR: - for (i = 0; i < SK_MAX_SENSORS; i ++) { + for (i = 0; i < SK_MAX_SENSORS; i++) { pAC->I2c.SenTable[i].SenErrFlag = SK_SEN_ERR_OK; pAC->I2c.SenTable[i].SenErrCts = 0; pAC->I2c.SenTable[i].SenWarnCts = 0; @@ -1255,4 +1490,4 @@ return(0); } /* SkI2cEvent*/ -#endif /* !SK_DIAG */ +#endif /* !SK_DIAG */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/sklm80.c linux.21pre4-ac2/drivers/net/sk98lin/sklm80.c --- linux.21pre4/drivers/net/sk98lin/sklm80.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/sklm80.c 2003-01-06 15:38:18.000000000 +0000 @@ -2,16 +2,15 @@ * * Name: sklm80.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.17 $ - * Date: $Date: 1999/11/22 13:35:51 $ + * Version: $Revision: 1.20 $ + * Date: $Date: 2002/08/13 09:16:27 $ * Purpose: Funktions to access Voltage and Temperature Sensor (LM80) * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998,1999 SysKonnect, - * a business unit of Schneider & Koch & Co. Datensysteme GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,6 +26,16 @@ * History: * * $Log: sklm80.c,v $ + * Revision 1.20 2002/08/13 09:16:27 rschmidt + * Changed return value for SkLm80ReadSensor() back to 'int' + * Editorial changes + * + * Revision 1.19 2002/08/06 09:43:31 jschmalz + * Extensions and changes for Yukon + * + * Revision 1.18 2002/08/02 12:26:57 rschmidt + * Editorial changes + * * Revision 1.17 1999/11/22 13:35:51 cgoos * Changed license header to GPL. * @@ -93,7 +102,7 @@ LM80 functions */ static const char SysKonnectFileId[] = - "$Id: sklm80.c,v 1.17 1999/11/22 13:35:51 cgoos Exp $" ; + "$Id: sklm80.c,v 1.20 2002/08/13 09:16:27 rschmidt Exp $" ; #include "h/skdrv1st.h" /* Driver Specific Definitions */ #include "h/lm80.h" @@ -107,15 +116,15 @@ #ifdef SK_DIAG /* - * read the regeister 'reg' from the device 'device' + * read the register 'Reg' from the device 'Dev' * * return read error -1 * success the read value */ int SkLm80RcvReg( SK_IOC IoC, /* Adapter Context */ -int Dev, /* I2C device address */ -int Reg) /* register to read */ +int Dev, /* I2C device address */ +int Reg) /* register to read */ { int Val = 0; int TempExt; @@ -134,9 +143,9 @@ return(-1); } - switch(Reg) { + switch (Reg) { case LM80_TEMP_IN: - Val = (int)SkI2cRcvByte(IoC, 1) ; + Val = (int)SkI2cRcvByte(IoC, 1); /* First: correct the value: it might be negative */ if ((Val & 0x80) != 0) { @@ -145,7 +154,9 @@ } Val = Val * SK_LM80_TEMP_LSB; SkI2cStop(IoC); - TempExt = (int) SkLm80RcvReg(IoC,LM80_ADDR,LM80_TEMP_CTRL); + + TempExt = (int)SkLm80RcvReg(IoC, LM80_ADDR, LM80_TEMP_CTRL); + if (Val > 0) { Val += ((TempExt >> 7) * SK_LM80_TEMPEXT_LSB); } @@ -158,10 +169,11 @@ case LM80_VT1_IN: case LM80_VT2_IN: case LM80_VT3_IN: - Val = (int) SkI2cRcvByte(IoC, 1) * SK_LM80_VT_LSB; + Val = (int)SkI2cRcvByte(IoC, 1) * SK_LM80_VT_LSB; break; + default: - Val = (int) SkI2cRcvByte(IoC, 1); + Val = (int)SkI2cRcvByte(IoC, 1); break; } @@ -173,30 +185,32 @@ /* * read a sensors value (LM80 specific) * - * This function reads a sensors value from the I2c sensor chip LM80. The - * sensor is defined by its index into the sensors database in the struct + * This function reads a sensors value from the I2C sensor chip LM80. + * The sensor is defined by its index into the sensors database in the struct * pAC points to. * * Returns 1 if the read is completed - * 0 if the read must be continued (I2c Bus still allocated) + * 0 if the read must be continued (I2C Bus still allocated) */ -int SkLm80ReadSensor( +int SkLm80ReadSensor( SK_AC *pAC, /* Adapter Context */ -SK_IOC IoC, /* IoContext needed in level 1 and 2 */ +SK_IOC IoC, /* I/O Context needed in level 1 and 2 */ SK_SENSOR *pSen) /* Sensor to be read */ { SK_I32 Value; - switch(pSen->SenState) { + switch (pSen->SenState) { case SK_SEN_IDLE: /* Send address to ADDR register */ - SK_I2C_CTL(IoC,I2C_READ,pSen->SenDev,pSen->SenReg,0); + SK_I2C_CTL(IoC, I2C_READ, pSen->SenDev, pSen->SenReg, 0); pSen->SenState = SK_SEN_VALUE ; - BREAK_OR_WAIT(pAC, IoC, I2C_READ) ; + BREAK_OR_WAIT(pAC, IoC, I2C_READ); + case SK_SEN_VALUE: /* Read value from data register */ - SK_IN32(IoC,B2_I2C_DATA, ((SK_U32 *)&Value)); + SK_IN32(IoC, B2_I2C_DATA, ((SK_U32 *)&Value)); + Value &= 0xff; /* only least significant byte is valid */ /* Do NOT check the Value against the thresholds */ @@ -228,10 +242,9 @@ Value = Value - 256; } - /* We have a temperature sensor and need to get the signed - * extension. For now we get the extension from the last - * reading, so in the normal case we won't see flickering - * temperatures. + /* We have a temperature sensor and need to get the signed extension. + * For now we get the extension from the last reading, so in the normal + * case we won't see flickering temperatures. */ pSen->SenValue = (Value * SK_LM80_TEMP_LSB) + (pSen->SenValue % SK_LM80_TEMP_LSB); @@ -240,32 +253,34 @@ SK_I2C_CTL(IoC, I2C_READ, pSen->SenDev, LM80_TEMP_CTRL, 0); pSen->SenState = SK_SEN_VALEXT ; - BREAK_OR_WAIT(pAC, IoC, I2C_READ) ; + BREAK_OR_WAIT(pAC, IoC, I2C_READ); + case SK_SEN_VALEXT: /* Read value from data register */ - SK_IN32(IoC,B2_I2C_DATA,((SK_U32 *)&Value)); + SK_IN32(IoC, B2_I2C_DATA, ((SK_U32 *)&Value)); Value &= LM80_TEMP_LSB_9; /* only bit 7 is valid */ /* cut the LSB bit */ pSen->SenValue = ((pSen->SenValue / SK_LM80_TEMP_LSB) * - SK_LM80_TEMP_LSB) ; + SK_LM80_TEMP_LSB); if (pSen->SenValue < 0) { /* Value negative: The bit value must be subtracted */ pSen->SenValue -= ((Value >> 7) * SK_LM80_TEMPEXT_LSB); - } else { + } + else { /* Value positive: The bit value must be added */ pSen->SenValue += ((Value >> 7) * SK_LM80_TEMPEXT_LSB); } pSen->SenState = SK_SEN_IDLE ; return(1); + default: - SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_I2C_E007, - SKERR_I2C_E007MSG) ; + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_I2C_E007, SKERR_I2C_E007MSG); return(1); } /* Not completed */ - return(0) ; + return(0); } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/skproc.c linux.21pre4-ac2/drivers/net/sk98lin/skproc.c --- linux.21pre4/drivers/net/sk98lin/skproc.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/skproc.c 2003-01-06 15:38:18.000000000 +0000 @@ -2,15 +2,15 @@ * * Name: skproc.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.2.2.2 $ - * Date: $Date: 2001/03/15 12:50:13 $ + * Version: $Revision: 1.3 $ + * Date: $Date: 2002/10/02 12:59:51 $ * Purpose: Funktions to display statictic data * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2001 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -28,6 +28,31 @@ * History: * * $Log: skproc.c,v $ + * Revision 1.3 2002/10/02 12:59:51 mlindner + * Add: Support for Yukon + * Add: Speed check and setup + * Add: Merge source for kernel 2.2.x and 2.4.x + * Add: Read sensor names directly from VPD + * Fix: Volt values + * + * Revision 1.2.2.7 2002/01/14 12:45:15 mlindner + * Fix: Editorial changes + * + * Revision 1.2.2.6 2001/12/06 15:26:07 mlindner + * Fix: Return value of proc_read + * + * Revision 1.2.2.5 2001/12/06 09:57:39 mlindner + * New ProcFs entries + * + * Revision 1.2.2.4 2001/09/05 12:16:02 mlindner + * Add: New ProcFs entries + * Fix: Counter Errors (Jumbo == to long errors) + * Fix: Kernel error compilation + * Fix: too short counters + * + * Revision 1.2.2.3 2001/06/25 07:26:26 mlindner + * Add: More error messages + * * Revision 1.2.2.2 2001/03/15 12:50:13 mlindner * fix: ProcFS owner protection * @@ -46,36 +71,24 @@ #include "h/skdrv1st.h" #include "h/skdrv2nd.h" -#define ZEROPAD 1 /* pad with zero */ -#define SIGN 2 /* unsigned/signed long */ -#define PLUS 4 /* show plus */ -#define SPACE 8 /* space if plus */ -#define LEFT 16 /* left justified */ -//#define SPECIAL 32 /* 0x */ -#define LARGE 64 - -extern void proc_fill_inode(struct inode *inode, int fill); -extern char * SkNumber(char * str, long long num, int base, int size, - int precision ,int type); -int proc_read(char *buffer, - char **buffer_location, - off_t offset, - int buffer_length, - int *eof, - void *data); - -static const char SK_Root_Dir_entry[] = "sk98lin"; -extern struct net_device *sk98lin_root_dev; - - -struct proc_dir_entry pSkRootDir = { - 0, - sizeof(SK_Root_Dir_entry)-1, - (const char*)SK_Root_Dir_entry, - S_IFDIR | S_IRUGO, - 2, 0, 0, 0, NULL, - NULL -}; +#define ZEROPAD 1 /* pad with zero */ +#define SIGN 2 /* unsigned/signed long */ +#define PLUS 4 /* show plus */ +#define SPACE 8 /* space if plus */ +#define LEFT 16 /* left justified */ +#define SPECIALX 32 /* 0x */ +#define LARGE 64 + +extern SK_AC *pACList; +extern struct net_device *SkGeRootDev; + +extern char * SkNumber( + char * str, + long long num, + int base, + int size, + int precision, + int type); /***************************************************************************** @@ -100,13 +113,14 @@ int len = 0; int t; int i; - DEV_NET *pNet; - SK_AC *pAC; - char test_buf[100]; + DEV_NET *pNet; + SK_AC *pAC; + char test_buf[100]; + char sens_msg[50]; unsigned long Flags; unsigned int Size; - struct net_device *next; - struct net_device *SkgeProcDev = sk98lin_root_dev; + struct SK_NET_DEVICE *next; + struct SK_NET_DEVICE *SkgeProcDev = SkGeRootDev; SK_PNMI_STRUCT_DATA *pPnmiStruct; SK_PNMI_STAT *pPnmiStat; @@ -122,7 +136,7 @@ for (t=pAC->GIni.GIMacsFound; t > 0; t--) { if ((pAC->GIni.GIMacsFound == 2) && pAC->RlmtNets == 1) t--; - + spin_lock_irqsave(&pAC->SlowPathLock, Flags); Size = SK_PNMI_STRUCT_SIZE; SkPnmiGetStruct(pAC, pAC->IoBase, @@ -135,151 +149,214 @@ "\nDetailed statistic for device %s\n", pAC->dev[t-1]->name); len += sprintf(buffer + len, - "==================================\n"); + "=======================================\n"); /* Board statistics */ len += sprintf(buffer + len, "\nBoard statistics\n\n"); len += sprintf(buffer + len, - "Active Port %c\n", + "Active Port %c\n", 'A' + pAC->Rlmt.Net[t-1].Port[pAC->Rlmt. Net[t-1].PrefPort]->PortNumber); len += sprintf(buffer + len, - "Preferred Port %c\n", + "Preferred Port %c\n", 'A' + pAC->Rlmt.Net[t-1].Port[pAC->Rlmt. Net[t-1].PrefPort]->PortNumber); len += sprintf(buffer + len, - "Bus speed (Mhz) %d\n", + "Bus speed (MHz) %d\n", pPnmiStruct->BusSpeed); len += sprintf(buffer + len, - "Bus width (Bit) %d\n", + "Bus width (Bit) %d\n", pPnmiStruct->BusWidth); - - for (i=0; i < SK_MAX_SENSORS; i ++) { - if (strcmp(pAC->I2c.SenTable[i].SenDesc, - "Temperature") == 0 ) { + len += sprintf(buffer + len, + "Hardware revision v%d.%d\n", + (pAC->GIni.GIPciHwRev >> 4) & 0x0F, + pAC->GIni.GIPciHwRev & 0x0F); + + /* Print sensor informations */ + for (i=0; i < pAC->I2c.MaxSens; i ++) { + /* Check type */ + switch (pAC->I2c.SenTable[i].SenType) { + case 1: + strcpy(sens_msg, pAC->I2c.SenTable[i].SenDesc); + strcat(sens_msg, " (C)"); len += sprintf(buffer + len, - "Temperature (C) %d.%d\n", + "%-25s %d.%02d\n", + sens_msg, pAC->I2c.SenTable[i].SenValue / 10, pAC->I2c.SenTable[i].SenValue % 10); + + strcpy(sens_msg, pAC->I2c.SenTable[i].SenDesc); + strcat(sens_msg, " (F)"); len += sprintf(buffer + len, - "Temperature (F) %d.%d\n", + "%-25s %d.%02d\n", + sens_msg, ((((pAC->I2c.SenTable[i].SenValue) *10)*9)/5 + 3200)/100, ((((pAC->I2c.SenTable[i].SenValue) *10)*9)/5 + 3200) % 10); - } else if (strcmp(pAC->I2c.SenTable[i].SenDesc, - "Speed Fan") == 0 ) { - len += sprintf(buffer + len, - "Speed Fan %d\n", - pAC->I2c.SenTable[i].SenValue); - } else { + break; + case 2: + strcpy(sens_msg, pAC->I2c.SenTable[i].SenDesc); + strcat(sens_msg, " (V)"); len += sprintf(buffer + len, - "%-20s %d.%d\n", - pAC->I2c.SenTable[i].SenDesc, + "%-25s %d.%03d\n", + sens_msg, pAC->I2c.SenTable[i].SenValue / 1000, pAC->I2c.SenTable[i].SenValue % 1000); + break; + case 3: + strcpy(sens_msg, pAC->I2c.SenTable[i].SenDesc); + strcat(sens_msg, " (rpm)"); + len += sprintf(buffer + len, + "%-25s %d\n", + sens_msg, + pAC->I2c.SenTable[i].SenValue); + break; + default: + break; } } /*Receive statistics */ - len += sprintf(buffer + len, "\nReceive statistics\n\n"); len += sprintf(buffer + len, - "Received bytes %s\n", + "Received bytes %s\n", SkNumber(test_buf, pPnmiStat->StatRxOctetsOkCts, 10,0,-1,0)); len += sprintf(buffer + len, - "Received packets %s\n", + "Received packets %s\n", SkNumber(test_buf, pPnmiStat->StatRxOkCts, 10,0,-1,0)); +#if 0 + if (pAC->GIni.GP[0].PhyType == SK_PHY_XMAC && + pAC->HWRevision < 12) { + pPnmiStruct->InErrorsCts = pPnmiStruct->InErrorsCts - + pPnmiStat->StatRxShortsCts; + pPnmiStat->StatRxShortsCts = 0; + } +#endif + if (pNet->Mtu > 1500) + pPnmiStruct->InErrorsCts = pPnmiStruct->InErrorsCts - + pPnmiStat->StatRxTooLongCts; + len += sprintf(buffer + len, - "Received errors %s\n", - SkNumber(test_buf, pPnmiStat->StatRxFcsCts, + "Receive errors %s\n", + SkNumber(test_buf, pPnmiStruct->InErrorsCts, 10,0,-1,0)); len += sprintf(buffer + len, - "Received dropped %s\n", + "Receive drops %s\n", SkNumber(test_buf, pPnmiStruct->RxNoBufCts, 10,0,-1,0)); len += sprintf(buffer + len, - "Received multicast %s\n", + "Received multicast %s\n", SkNumber(test_buf, pPnmiStat->StatRxMulticastOkCts, 10,0,-1,0)); len += sprintf(buffer + len, - "Received errors types\n"); + "Receive error types\n"); len += sprintf(buffer + len, - " length errors %s\n", + " length %s\n", SkNumber(test_buf, pPnmiStat->StatRxRuntCts, 10, 0, -1, 0)); len += sprintf(buffer + len, - " over errors %s\n", + " buffer overflow %s\n", SkNumber(test_buf, pPnmiStat->StatRxFifoOverflowCts, 10, 0, -1, 0)); len += sprintf(buffer + len, - " crc errors %s\n", + " bad crc %s\n", SkNumber(test_buf, pPnmiStat->StatRxFcsCts, 10, 0, -1, 0)); len += sprintf(buffer + len, - " frame errors %s\n", + " framing %s\n", SkNumber(test_buf, pPnmiStat->StatRxFramingCts, 10, 0, -1, 0)); len += sprintf(buffer + len, - " fifo errors %s\n", - SkNumber(test_buf, pPnmiStat->StatRxFifoOverflowCts, - 10, 0, -1, 0)); - len += sprintf(buffer + len, - " missed errors %s\n", + " missed frames %s\n", SkNumber(test_buf, pPnmiStat->StatRxMissedCts, 10, 0, -1, 0)); - + + if (pNet->Mtu > 1500) + pPnmiStat->StatRxTooLongCts = 0; + + len += sprintf(buffer + len, + " too long %s\n", + SkNumber(test_buf, pPnmiStat->StatRxTooLongCts, + 10, 0, -1, 0)); + len += sprintf(buffer + len, + " carrier extension %s\n", + SkNumber(test_buf, pPnmiStat->StatRxCextCts, + 10, 0, -1, 0)); + len += sprintf(buffer + len, + " too short %s\n", + SkNumber(test_buf, pPnmiStat->StatRxShortsCts, + 10, 0, -1, 0)); + len += sprintf(buffer + len, + " symbol %s\n", + SkNumber(test_buf, pPnmiStat->StatRxSymbolCts, + 10, 0, -1, 0)); + len += sprintf(buffer + len, + " LLC MAC size %s\n", + SkNumber(test_buf, pPnmiStat->StatRxIRLengthCts, + 10, 0, -1, 0)); + len += sprintf(buffer + len, + " carrier event %s\n", + SkNumber(test_buf, pPnmiStat->StatRxCarrierCts, + 10, 0, -1, 0)); + len += sprintf(buffer + len, + " jabber %s\n", + SkNumber(test_buf, pPnmiStat->StatRxJabberCts, + 10, 0, -1, 0)); + + /*Transmit statistics */ len += sprintf(buffer + len, "\nTransmit statistics\n\n"); len += sprintf(buffer + len, - "Transmit bytes %s\n", + "Transmited bytes %s\n", SkNumber(test_buf, pPnmiStat->StatTxOctetsOkCts, 10,0,-1,0)); len += sprintf(buffer + len, - "Transmit packets %s\n", + "Transmited packets %s\n", SkNumber(test_buf, pPnmiStat->StatTxOkCts, 10,0,-1,0)); len += sprintf(buffer + len, - "Transmit errors %s\n", + "Transmit errors %s\n", SkNumber(test_buf, pPnmiStat->StatTxSingleCollisionCts, 10,0,-1,0)); len += sprintf(buffer + len, - "Transmit dropped %s\n", + "Transmit dropped %s\n", SkNumber(test_buf, pPnmiStruct->TxNoBufCts, 10,0,-1,0)); len += sprintf(buffer + len, - "Transmit collisions %s\n", + "Transmit collisions %s\n", SkNumber(test_buf, pPnmiStat->StatTxSingleCollisionCts, 10,0,-1,0)); len += sprintf(buffer + len, - "Transmited errors types\n"); + "Transmit errors types\n"); len += sprintf(buffer + len, - " aborted errors %ld\n", + " excessive collision %ld\n", pAC->stats.tx_aborted_errors); len += sprintf(buffer + len, - " carrier errors %s\n", + " carrier %s\n", SkNumber(test_buf, pPnmiStat->StatTxCarrierCts, 10, 0, -1, 0)); len += sprintf(buffer + len, - " fifo errors %s\n", + " fifo underrun %s\n", SkNumber(test_buf, pPnmiStat->StatTxFifoUnderrunCts, 10, 0, -1, 0)); len += sprintf(buffer + len, - " heartbeat errors %s\n", + " heartbeat %s\n", SkNumber(test_buf, pPnmiStat->StatTxCarrierCts, 10, 0, -1, 0)); len += sprintf(buffer + len, - " window errors %ld\n", + " window %ld\n", pAC->stats.tx_window_errors); + } } SkgeProcDev = next; @@ -388,7 +465,7 @@ size--; } } - if (type & SPECIAL) { + if (type & SPECIALX) { if (base == 16) size -= 2; else if (base == 8) @@ -408,7 +485,7 @@ *str++ = ' '; if (sign) *str++ = sign; - if (type & SPECIAL) { + if (type & SPECIALX) { if (base==8) *str++ = '0'; else if (base==16) { diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/skqueue.c linux.21pre4-ac2/drivers/net/sk98lin/skqueue.c --- linux.21pre4/drivers/net/sk98lin/skqueue.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/skqueue.c 2003-01-06 15:38:18.000000000 +0000 @@ -1,32 +1,23 @@ /****************************************************************************** * * Name: skqueue.c - * Project: PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.14 $ - * Date: $Date: 1998/10/15 15:11:35 $ + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.18 $ + * Date: $Date: 2002/05/07 14:11:11 $ * Purpose: Management of an event queue. * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1989-1998 SysKonnect, + * (C)Copyright 1998,1999 SysKonnect, * a business unit of Schneider & Koch & Co. Datensysteme GmbH. - * All Rights Reserved * - * THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF SYSKONNECT - * The copyright notice above does not evidence any - * actual or intended publication of such source code. - * - * This Module contains Proprietary Information of SysKonnect - * and should be treated as Confidential. - * - * The information in this file is provided for the exclusive use of - * the licensees of SysKonnect. - * Such users have the right to use, modify, and incorporate this code - * into products for purposes authorized by the license agreement - * provided they include this notice and the associated copyright notice - * with any such product. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * * The information in this file is provided "AS IS" without warranty. * ******************************************************************************/ @@ -36,6 +27,18 @@ * History: * * $Log: skqueue.c,v $ + * Revision 1.18 2002/05/07 14:11:11 rwahl + * Fixed Watcom Precompiler error. + * + * Revision 1.17 2002/03/25 10:06:41 mkunz + * SkIgnoreEvent deleted + * + * Revision 1.16 2002/03/15 10:51:59 mkunz + * Added event classes for link aggregation + * + * Revision 1.15 1999/11/22 13:36:29 cgoos + * Changed license header to GPL. + * * Revision 1.14 1998/10/15 15:11:35 gklug * fix: ID_sccs to SysKonnectFileId * @@ -88,7 +91,7 @@ Event queue and dispatcher */ static const char SysKonnectFileId[] = - "$Header: /usr56/projects/ge/schedule/skqueue.c,v 1.14 1998/10/15 15:11:35 gklug Exp $" ; + "$Header: /usr56/projects/ge/schedule/skqueue.c,v 1.18 2002/05/07 14:11:11 rwahl Exp $" ; #include "h/skdrv1st.h" /* Driver Specific Definitions */ #include "h/skqueue.h" /* Queue Definitions */ @@ -168,9 +171,7 @@ while (pEv != pAC->Event.EvPut) { PRINTF("dispatch Class %d Event %d\n",pEv->Class,pEv->Event) ; switch(Class = pEv->Class) { - case SKGE_DRV : /* Driver Event */ - Rtv = SkDrvEvent(pAC,Ioc,pEv->Event,pEv->Para); - break ; +#ifndef SK_USE_LAC_EV case SKGE_RLMT : /* RLMT Event */ Rtv = SkRlmtEvent(pAC,Ioc,pEv->Event,pEv->Para); break ; @@ -180,9 +181,33 @@ case SKGE_PNMI : Rtv = SkPnmiEvent(pAC,Ioc,pEv->Event,pEv->Para); break ; +#endif /* SK_USE_LAC_EV */ + case SKGE_DRV : /* Driver Event */ + Rtv = SkDrvEvent(pAC,Ioc,pEv->Event,pEv->Para); + break ; +#ifndef SK_USE_SW_TIMER case SKGE_HWAC : Rtv = SkGeSirqEvent(pAC,Ioc,pEv->Event,pEv->Para); break ; +#else /* !SK_USE_SW_TIMER */ + case SKGE_SWT : + Rtv = SkSwtEvent(pAC,Ioc,pEv->Event,pEv->Para); + break ; +#endif /* !SK_USE_SW_TIMER */ +#ifdef SK_USE_LAC_EV + case SKGE_LACP : + Rtv = SkLacpEvent(pAC,Ioc,pEv->Event,pEv->Para); + break ; + case SKGE_RSF : + Rtv = SkRsfEvent(pAC,Ioc,pEv->Event,pEv->Para); + break ; + case SKGE_MARKER : + Rtv = SkMarkerEvent(pAC,Ioc,pEv->Event,pEv->Para); + break ; + case SKGE_FD : + Rtv = SkFdEvent(pAC,Ioc,pEv->Event,pEv->Para); + break ; +#endif /* SK_USE_LAC_EV */ #ifdef SK_USE_CSUM case SKGE_CSUM : Rtv = SkCsEvent(pAC,Ioc,pEv->Event,pEv->Para); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/skrlmt.c linux.21pre4-ac2/drivers/net/sk98lin/skrlmt.c --- linux.21pre4/drivers/net/sk98lin/skrlmt.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/skrlmt.c 2003-01-06 15:38:18.000000000 +0000 @@ -2,8 +2,8 @@ * * Name: skrlmt.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.61 $ - * Date: $Date: 2001/03/14 12:52:08 $ + * Version: $Revision: 1.65 $ + * Date: $Date: 2002/07/22 14:29:48 $ * Purpose: Manage links on SK-NET Adapters, esp. redundant ones. * ******************************************************************************/ @@ -26,6 +26,23 @@ * History: * * $Log: skrlmt.c,v $ + * Revision 1.65 2002/07/22 14:29:48 rwahl + * - Removed BRK statement from debug check. + * + * Revision 1.64 2001/11/28 19:36:14 rwahl + * - RLMT Packets sent to an invalid MAC address in CLP/CLPSS mode + * (#10650). + * - Reworked fix for port switching in CLS mode (#10639) + * (no dependency to RLMT module). + * - Enabled dbg output for entry/exit of event functions. + * - Editorial changes. + * + * Revision 1.63 2001/10/26 07:53:18 afischer + * Port switching bug in `check local link` mode + * + * Revision 1.62 2001/07/03 12:16:30 mkunz + * New Flag ChgBcPrio (Change priority of last broadcast received) + * * Revision 1.61 2001/03/14 12:52:08 rassmann * Fixed reporting of active port up/down to PNMI. * @@ -255,7 +272,7 @@ #ifndef lint static const char SysKonnectFileId[] = - "@(#) $Id: skrlmt.c,v 1.61 2001/03/14 12:52:08 rassmann Exp $ (C) SysKonnect."; + "@(#) $Id: skrlmt.c,v 1.65 2002/07/22 14:29:48 rwahl Exp $ (C) SysKonnect."; #endif /* !defined(lint) */ #define __SKRLMT_C @@ -585,8 +602,8 @@ for (i = 0; i < SK_MAX_NETS; i++) { pAC->Rlmt.Net[i].RlmtState = SK_RLMT_RS_INIT; pAC->Rlmt.Net[i].RootIdSet = SK_FALSE; - pAC->Rlmt.Net[i].Preference = 0xFFFFFFFF; /* Automatic. */ pAC->Rlmt.Net[i].PrefPort = SK_RLMT_DEF_PREF_PORT; + pAC->Rlmt.Net[i].Preference = 0xFFFFFFFF; /* Automatic. */ /* Just assuming. */ pAC->Rlmt.Net[i].ActivePort = pAC->Rlmt.Net[i].PrefPort; pAC->Rlmt.Net[i].RlmtMode = SK_RLMT_DEF_MODE; @@ -714,7 +731,7 @@ FirstMacUp = pAC->Rlmt.Net[NetIdx].Port[i]; } else { - pAC->Rlmt.Net[NetIdx].Port[i]->PortCheck[ + PrevMacUp->PortCheck[ pAC->Rlmt.Net[NetIdx].Port[i]->PortsChecked].CheckAddr = pAC->Rlmt.Net[NetIdx].Port[i]->AddrPort->CurrentMacAddress; PrevMacUp->PortCheck[ @@ -737,7 +754,7 @@ #ifdef DEBUG for (i = 0; i < pAC->Rlmt.Net[NetIdx].NumPorts; i++) { SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, - ("Port %d checks %d other ports: %2X.\n", NetIdx, + ("Port %d checks %d other ports: %2X.\n", i, pAC->Rlmt.Net[NetIdx].Port[i]->PortsChecked, pAC->Rlmt.Net[NetIdx].Port[i]->PortCheck[0].CheckAddr.a[5])) } @@ -773,6 +790,22 @@ SK_MBUF *pMb; SK_RLMT_PACKET *pPacket; +#ifdef DEBUG + SK_U8 CheckSrc = 0; + SK_U8 CheckDest = 0; + + for (i = 0; i < SK_MAC_ADDR_LEN; ++i) { + CheckSrc |= SrcAddr->a[i]; + CheckDest |= DestAddr->a[i]; + } + + if ((CheckSrc == 0) || (CheckDest == 0)) { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_ERR, + ("SkRlmtBuildPacket: Invalid %s%saddr.\n", + (CheckSrc == 0 ? "Src" : ""), (CheckDest == 0 ? "Dest" : ""))) + } +#endif + if ((pMb = SkDrvAllocRlmtMbuf(pAC, IoC, SK_RLMT_MAX_PACKET_SIZE)) != NULL) { pPacket = (SK_RLMT_PACKET*)pMb->pData; for (i = 0; i < SK_MAC_ADDR_LEN; i++) { @@ -1504,7 +1537,7 @@ /* Select port with the latest TimeStamp. */ for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) { -#ifdef xDEBUG +#ifdef DEBUG SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, ("TimeStamp Port %d: %08x %08x.\n", i, @@ -1521,10 +1554,8 @@ } if (PortFound) { -#if 0 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, ("Port %d received the last broadcast.\n", *pSelect)) -#endif /* 0 */ /* Look if another port's time stamp is similar. */ for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) { @@ -1537,7 +1568,7 @@ pAC->Rlmt.Port[i].BcTimeStamp + SK_RLMT_BC_DELTA > BcTimeStamp)) { PortFound = SK_FALSE; -#ifdef xDEBUG +#ifdef DEBUG SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, ("Port %d received a broadcast at a similar time.\n", i)) #endif /* DEBUG */ @@ -1546,10 +1577,11 @@ } } -#ifdef xDEBUG +#ifdef DEBUG if (PortFound) { SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, - ("SK_RLMT_CHECK_SWITCH found Port %d receiving the substantially latest broadcast (%d).\n", + ("SK_RLMT_CHECK_SWITCH found Port %d receiving the substantially " + "latest broadcast (%d).\n", *pSelect, BcTimeStamp - pAC->Rlmt.Port[1 - *pSelect].BcTimeStamp)) } @@ -1794,6 +1826,13 @@ PortFound = SK_FALSE; pAC->Rlmt.CheckSwitch = SK_FALSE; +#if 0 /* RW 2001/10/18 - active port becomes always prefered one */ + if (pAC->Rlmt.Net[NetIdx].Preference == 0xFFFFFFFF) { /* Automatic */ + /* disable auto-fail back */ + PrefPort = Active; + } +#endif + if (pAC->Rlmt.Net[NetIdx].LinksUp == 0) { /* Last link went down - shut down the net. */ pAC->Rlmt.Net[NetIdx].RlmtState = SK_RLMT_RS_NET_DOWN; @@ -1882,7 +1921,10 @@ * else * SwitchSoft */ - if (pAC->Rlmt.Net[0].RlmtMode != SK_RLMT_MODE_CLS) { + /* check of ChgBcPrio flag added */ + if ((pAC->Rlmt.Net[0].RlmtMode != SK_RLMT_MODE_CLS) && + (!pAC->Rlmt.Net[0].ChgBcPrio)) { + if (!PortFound) { PortFound = SkRlmtSelectBcRx( pAC, IoC, Active, PrefPort, &Para.Para32[1]); @@ -1894,6 +1936,20 @@ } } /* pAC->Rlmt.RlmtMode != SK_RLMT_MODE_CLS */ + /* with changed priority for last broadcast received */ + if ((pAC->Rlmt.Net[0].RlmtMode != SK_RLMT_MODE_CLS) && + (pAC->Rlmt.Net[0].ChgBcPrio)) { + if (!PortFound) { + PortFound = SkRlmtSelectNotSuspect( + pAC, IoC, Active, PrefPort, &Para.Para32[1]); + } + + if (!PortFound) { + PortFound = SkRlmtSelectBcRx( + pAC, IoC, Active, PrefPort, &Para.Para32[1]); + } + } /* pAC->Rlmt.RlmtMode != SK_RLMT_MODE_CLS */ + if (!PortFound) { PortFound = SkRlmtSelectUp( pAC, IoC, Active, PrefPort, &Para.Para32[1], AUTONEG_SUCCESS); @@ -1927,6 +1983,7 @@ } /* pAC->Rlmt.RlmtMode != SK_RLMT_MODE_CLS */ if (PortFound) { + if (Para.Para32[1] != Active) { SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, ("Active: %d, Para1: %d.\n", Active, Para.Para32[1])) @@ -2698,10 +2755,8 @@ SK_U32 PortNumber; SK_U32 i; -#if 0 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, ("SK_RLMT_TIM Event BEGIN.\n")) -#endif /* 0 */ if (Para.Para32[1] != (SK_U32)-1) { SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, @@ -2778,10 +2833,8 @@ SK_RLMT_RCS_SEG | SK_RLMT_RCS_REPORT_SEG; } -#if 0 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, ("SK_RLMT_TIM Event END.\n")) -#endif /* 0 */ } /* SkRlmtEvtTim */ @@ -2804,7 +2857,7 @@ SK_IOC IoC, /* I/O Context */ SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */ { -#ifdef XDEBUG +#ifdef xDEBUG int j; #endif /* DEBUG */ @@ -2820,7 +2873,7 @@ } #ifdef xDEBUG - for (j = 0; i < pAC->Rlmt.Net[Para.Para32[0]].NumPorts; j++) { + for (j = 0; j < pAC->Rlmt.Net[Para.Para32[0]].NumPorts; j++) { SK_ADDR_PORT *pAPort; SK_U32 k; SK_U16 *InAddr; @@ -2842,7 +2895,7 @@ pAPort->Exact[k].a[4], pAPort->Exact[k].a[5])) } } -#endif /* DEBUG */ +#endif /* xDEBUG */ SkRlmtCheckSeg(pAC, IoC, Para.Para32[0]); @@ -2874,10 +2927,9 @@ SK_MBUF *pNextMb; SK_U32 NetNumber; -#if 0 + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, ("SK_RLMT_PACKET_RECEIVED Event BEGIN.\n")) -#endif /* 0 */ /* Should we ignore frames during port switching? */ @@ -2905,10 +2957,8 @@ } } -#if 0 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, ("SK_RLMT_PACKET_RECEIVED Event END.\n")) -#endif /* 0 */ } /* SkRlmtEvtPacketRx */ @@ -2987,6 +3037,9 @@ SK_IOC IoC, /* I/O Context */ SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */ { + SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, + ("SK_RLMT_STATS_UPDATE Event BEGIN.\n")) + if (Para.Para32[1] != (SK_U32)-1) { SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, ("Bad Parameter.\n")) @@ -3003,15 +3056,10 @@ return; } -#if 0 - SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, - ("SK_RLMT_STATS_UPDATE Event BEGIN.\n")) - /* Update statistics - currently always up-to-date. */ SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, ("SK_RLMT_STATS_UPDATE Event END.\n")) -#endif /* 0 */ } /* SkRlmtEvtStatsUpdate */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/sktimer.c linux.21pre4-ac2/drivers/net/sk98lin/sktimer.c --- linux.21pre4/drivers/net/sk98lin/sktimer.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/sktimer.c 2003-01-06 15:38:18.000000000 +0000 @@ -1,32 +1,23 @@ /****************************************************************************** * * Name: sktimer.c - * Project: PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.11 $ - * Date: $Date: 1998/12/17 13:24:13 $ + * Project: GEnesis, PCI Gigabit Ethernet Adapter + * Version: $Revision: 1.12 $ + * Date: $Date: 1999/11/22 13:38:51 $ * Purpose: High level timer functions. * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1989-1998 SysKonnect, + * (C)Copyright 1998,1999 SysKonnect, * a business unit of Schneider & Koch & Co. Datensysteme GmbH. - * All Rights Reserved * - * THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF SYSKONNECT - * The copyright notice above does not evidence any - * actual or intended publication of such source code. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. * - * This Module contains Proprietary Information of SysKonnect - * and should be treated as Confidential. - * - * The information in this file is provided for the exclusive use of - * the licensees of SysKonnect. - * Such users have the right to use, modify, and incorporate this code - * into products for purposes authorized by the license agreement - * provided they include this notice and the associated copyright notice - * with any such product. * The information in this file is provided "AS IS" without warranty. * ******************************************************************************/ @@ -36,6 +27,9 @@ * History: * * $Log: sktimer.c,v $ + * Revision 1.12 1999/11/22 13:38:51 cgoos + * Changed license header to GPL. + * * Revision 1.11 1998/12/17 13:24:13 gklug * fix: restart problem: do NOT destroy timer queue if init 1 is done * @@ -82,7 +76,7 @@ Event queue and dispatcher */ static const char SysKonnectFileId[] = - "$Header: /usr56/projects/ge/schedule/sktimer.c,v 1.11 1998/12/17 13:24:13 gklug Exp $" ; + "$Header: /usr56/projects/ge/schedule/sktimer.c,v 1.12 1999/11/22 13:38:51 cgoos Exp $" ; #include "h/skdrv1st.h" /* Driver Specific Definitions */ #include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/skvpd.c linux.21pre4-ac2/drivers/net/sk98lin/skvpd.c --- linux.21pre4/drivers/net/sk98lin/skvpd.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/skvpd.c 2003-01-06 15:38:18.000000000 +0000 @@ -2,16 +2,15 @@ * * Name: skvpd.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.26 $ - * Date: $Date: 2000/06/13 08:00:01 $ + * Version: $Revision: 1.32 $ + * Date: $Date: 2002/10/14 16:04:29 $ * Purpose: Shared software to read and write VPD data * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998,1999 SysKonnect, - * a business unit of Schneider & Koch & Co. Datensysteme GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,6 +26,32 @@ * History: * * $Log: skvpd.c,v $ + * Revision 1.32 2002/10/14 16:04:29 rschmidt + * Added saving of VPD ROM Size from PCI_OUR_REG_2 + * Avoid reading of PCI_OUR_REG_2 in VpdTransferBlock() + * Editorial changes + * + * Revision 1.31 2002/09/10 09:21:32 mkarl + * Replaced all if(GIChipId == CHIP_ID_GENESIS) with new entry GIGenesis + * + * Revision 1.30 2002/09/09 14:43:03 mkarl + * changes for diagnostics in order to read VPD data before the adapter + * has been initialized + * editorial changes + * + * Revision 1.29 2002/07/26 13:20:43 mkarl + * added Yukon support + * save size of VPD in pAC->vpd.vpd_size + * + * Revision 1.28 2002/04/02 15:31:47 afischer + * Bug fix in VpdWait() + * + * Revision 1.27 2000/08/10 11:29:06 rassmann + * Editorial changes. + * Preserving 32-bit alignment in structs for the adapter context. + * Removed unused function VpdWriteDword() (#if 0). + * Made VpdReadKeyword() available for SKDIAG only. + * * Revision 1.26 2000/06/13 08:00:01 mkarl * additional cast to avoid compile problems in 64 bit environment * @@ -66,7 +91,7 @@ * * Revision 1.14 1998/10/28 07:20:38 gklug * chg: Interface functions to use IoC as parameter as well - * fix: VpdRead/WriteDWord now return SK_U32 + * fix: VpdRead/WriteDWord now returns SK_U32 * chg: VPD_IN/OUT names conform to SK_IN/OUT * add: usage of VPD_IN/OUT8 macros * add: VpdRead/Write Stream functions to r/w a stream of data @@ -88,7 +113,7 @@ * Revision 1.9 1998/09/16 07:33:52 malthoff * remove memcmp() by SK_MEMCMP and * memcpy() by SK_MEMCPY() to be - * independant from the 'C' Standard Library. + * independent from the 'C' Standard Library. * * Revision 1.8 1998/08/19 12:52:35 malthoff * compiler fix: use SK_VPD_KEY instead of S_VPD. @@ -124,7 +149,7 @@ Please refer skvpd.txt for infomation how to include this module */ static const char SysKonnectFileId[] = - "@(#)$Id: skvpd.c,v 1.26 2000/06/13 08:00:01 mkarl Exp $ (C) SK" ; + "@(#)$Id: skvpd.c,v 1.32 2002/10/14 16:04:29 rschmidt Exp $ (C) SK"; #include "h/skdrv1st.h" #include "h/sktypes.h" @@ -137,46 +162,58 @@ #ifndef SK_KR_PROTO static SK_VPD_PARA *vpd_find_para( SK_AC *pAC, - char *key, - SK_VPD_PARA *p) ; + char *key, + SK_VPD_PARA *p); #else /* SK_KR_PROTO */ -static SK_VPD_PARA *vpd_find_para() ; +static SK_VPD_PARA *vpd_find_para(); #endif /* SK_KR_PROTO */ /* - * waits for a completetion of a VPD transfer + * waits for a completion of a VPD transfer * The VPD transfer must complete within SK_TICKS_PER_SEC/16 * * returns 0: success, transfer completes * error exit(9) with a error message */ static int VpdWait( -SK_AC *pAC, /* Adapters context */ -SK_IOC IoC, /* IO Context */ +SK_AC *pAC, /* Adapters context */ +SK_IOC IoC, /* IO Context */ int event) /* event to wait for (VPD_READ / VPD_write) completion*/ { - SK_U64 start_time ; - SK_U16 state ; + SK_U64 start_time; + SK_U16 state; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_CTRL, - ("vpd wait for %s\n",event?"Write":"Read")) ; - start_time = SkOsGetTime(pAC) ; + SK_DBG_MSG(pAC,SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("VPD wait for %s\n", event?"Write":"Read")); + start_time = SkOsGetTime(pAC); do { if (SkOsGetTime(pAC) - start_time > SK_TICKS_PER_SEC/16) { - VPD_STOP(pAC,IoC) ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD, - SK_DBGCAT_FATAL|SK_DBGCAT_ERR, - ("ERROR:vpd wait timeout\n")) ; - return(1) ; - } - VPD_IN16(pAC,IoC,PCI_VPD_ADR_REG,&state) ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_CTRL, - ("state = %x, event %x\n",state,event)) ; - } while((int)(state & PCI_VPD_FLAG) == event) ; - return(0) ; + /* Bug fix AF: Thu Mar 28 2002 + * Do not call: VPD_STOP(pAC, IoC); + * A pending VPD read cycle can not be aborted by writing + * VPD_WRITE to the PCI_VPD_ADR_REG (VPD address register). + * Although the write threshold in the OUR-register protects + * VPD read only space from being overwritten this does not + * protect a VPD read from being `converted` into a VPD write + * operation (on the fly). As a consequence the VPD_STOP would + * delete VPD read only data. In case of any problems with the + * I2C bus we exit the loop here. The I2C read operation can + * not be aborted except by a reset (->LR). + */ + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_FATAL | SK_DBGCAT_ERR, + ("ERROR:VPD wait timeout\n")); + return(1); + } + VPD_IN16(pAC, IoC, PCI_VPD_ADR_REG, &state); + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("state = %x, event %x\n",state,event)); + } while((int)(state & PCI_VPD_FLAG) == event); + + return(0); } +#ifdef SKDIAG /* * Read the dword at address 'addr' from the VPD EEPROM. @@ -188,32 +225,37 @@ * * Returns the data read. */ -SK_U32 VpdReadDWord( -SK_AC *pAC, /* Adapters context */ -SK_IOC IoC, /* IO Context */ +SK_U32 VpdReadDWord( +SK_AC *pAC, /* Adapters context */ +SK_IOC IoC, /* IO Context */ int addr) /* VPD address */ { - SK_U32 Rtv ; + SK_U32 Rtv; /* start VPD read */ - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_CTRL, - ("vpd read dword at 0x%x\n",addr)) ; - addr &= ~VPD_WRITE ; /* ensure the R/W bit is set to read */ + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("VPD read dword at 0x%x\n",addr)); + addr &= ~VPD_WRITE; /* ensure the R/W bit is set to read */ - VPD_OUT16(pAC,IoC,PCI_VPD_ADR_REG, (SK_U16) addr) ; + VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, (SK_U16)addr); /* ignore return code here */ - (void)VpdWait(pAC,IoC,VPD_READ) ; + (void)VpdWait(pAC, IoC, VPD_READ); /* Don't swap here, it's a data stream of bytes */ - Rtv = 0 ; + Rtv = 0; - VPD_IN32(pAC,IoC,PCI_VPD_DAT_REG,&Rtv) ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_CTRL, - ("vpd read dword data = 0x%x\n",Rtv)) ; - return (Rtv) ; + VPD_IN32(pAC, IoC, PCI_VPD_DAT_REG, &Rtv); + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("VPD read dword data = 0x%x\n",Rtv)); + return(Rtv); } +#endif /* SKDIAG */ + +#if 0 + /* Write the dword 'data' at address 'addr' into the VPD EEPROM, and verify that the data is written. @@ -233,43 +275,43 @@ Returns 0: success - 1: error, I2C transfer does not terminate - 2: error, data verify error + 1: error, I2C transfer does not terminate + 2: error, data verify error */ -#if 0 /* Unused at the moment */ -static int VpdWriteDWord( -SK_AC *pAC, /* pAC pointer */ -SK_IOC IoC, /* IO Context */ +static int VpdWriteDWord( +SK_AC *pAC, /* pAC pointer */ +SK_IOC IoC, /* IO Context */ int addr, /* VPD address */ -SK_U32 data) /* VPD data to write */ +SK_U32 data) /* VPD data to write */ { /* start VPD write */ /* Don't swap here, it's a data stream of bytes */ - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_CTRL, - ("vpd write dword at addr 0x%x, data = 0x%x\n",addr,data)) ; - VPD_OUT32(pAC,IoC,PCI_VPD_DAT_REG, (SK_U32)data) ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("VPD write dword at addr 0x%x, data = 0x%x\n",addr,data)); + VPD_OUT32(pAC, IoC, PCI_VPD_DAT_REG, (SK_U32)data); /* But do it here */ - addr |= VPD_WRITE ; + addr |= VPD_WRITE; - VPD_OUT16(pAC,IoC,PCI_VPD_ADR_REG, (SK_U16)(addr | VPD_WRITE)) ; + VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, (SK_U16)(addr | VPD_WRITE)); /* this may take up to 10,6 ms */ - if (VpdWait(pAC,IoC,VPD_WRITE)) { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("Write Timed Out\n")) ; - return(1) ; - } ; + if (VpdWait(pAC, IoC, VPD_WRITE)) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("Write Timed Out\n")); + return(1); + }; /* verify data */ - if (VpdReadDWord(pAC,IoC,addr) != data) { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR|SK_DBGCAT_FATAL, - ("Data Verify Error\n")) ; - return(2) ; + if (VpdReadDWord(pAC, IoC, addr) != data) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL, + ("Data Verify Error\n")); + return(2); } - return(0) ; -} -#endif + return(0); +} /* VpdWriteDWord */ + +#endif /* 0 */ /* * Read one Stream of 'len' bytes of VPD data, starting at 'addr' from @@ -278,88 +320,85 @@ * Returns number of bytes read / written. */ static int VpdWriteStream( -SK_AC *pAC, /* Adapters context */ -SK_IOC IoC, /* IO Context */ -char *buf, /* data buffer */ +SK_AC *pAC, /* Adapters context */ +SK_IOC IoC, /* IO Context */ +char *buf, /* data buffer */ int Addr, /* VPD start address */ int Len) /* number of bytes to read / to write */ { - int i ; - int j ; - SK_U16 AdrReg ; - int Rtv ; - SK_U8 * pComp; /* Compare pointer */ - SK_U8 Data ; /* Input Data for Compare */ + int i; + int j; + SK_U16 AdrReg; + int Rtv; + SK_U8 * pComp; /* Compare pointer */ + SK_U8 Data; /* Input Data for Compare */ /* Init Compare Pointer */ pComp = (SK_U8 *) buf; - for (i=0; i < Len; i ++, buf++) { + for (i = 0; i < Len; i++, buf++) { if ((i%sizeof(SK_U32)) == 0) { /* * At the begin of each cycle read the Data Reg * So it is initialized even if only a few bytes * are written. */ - AdrReg = (SK_U16) Addr ; - AdrReg &= ~VPD_WRITE ; /* READ operation */ + AdrReg = (SK_U16) Addr; + AdrReg &= ~VPD_WRITE; /* READ operation */ - VPD_OUT16(pAC,IoC,PCI_VPD_ADR_REG, AdrReg) ; + VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, AdrReg); - /* ignore return code here */ - Rtv = VpdWait(pAC,IoC,VPD_READ) ; + /* Wait for termination */ + Rtv = VpdWait(pAC, IoC, VPD_READ); if (Rtv != 0) { - return(i) ; + return(i); } } /* Write current Byte */ - VPD_OUT8(pAC,IoC,PCI_VPD_DAT_REG+(i%sizeof(SK_U32)), - *(SK_U8*)buf) ; + VPD_OUT8(pAC, IoC, PCI_VPD_DAT_REG + (i%sizeof(SK_U32)), + *(SK_U8*)buf); if (((i%sizeof(SK_U32)) == 3) || (i == (Len - 1))) { /* New Address needs to be written to VPD_ADDR reg */ - AdrReg = (SK_U16) Addr ; + AdrReg = (SK_U16) Addr; Addr += sizeof(SK_U32); - AdrReg |= VPD_WRITE ; /* WRITE operation */ + AdrReg |= VPD_WRITE; /* WRITE operation */ - VPD_OUT16(pAC,IoC,PCI_VPD_ADR_REG, AdrReg) ; + VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, AdrReg); /* Wait for termination */ - Rtv = VpdWait(pAC,IoC,VPD_WRITE) ; + Rtv = VpdWait(pAC, IoC, VPD_WRITE); if (Rtv != 0) { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("Write Timed Out\n")) ; - return(i - (i%sizeof(SK_U32))) ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("Write Timed Out\n")); + return(i - (i%sizeof(SK_U32))); } /* * Now re-read to verify */ - AdrReg &= ~VPD_WRITE ; /* READ operation */ + AdrReg &= ~VPD_WRITE; /* READ operation */ - VPD_OUT16(pAC,IoC,PCI_VPD_ADR_REG, AdrReg) ; + VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, AdrReg); /* Wait for termination */ - Rtv = VpdWait(pAC,IoC,VPD_READ) ; + Rtv = VpdWait(pAC, IoC, VPD_READ); if (Rtv != 0) { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("Verify Timed Out\n")) ; - return(i - (i%sizeof(SK_U32))) ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("Verify Timed Out\n")); + return(i - (i%sizeof(SK_U32))); } - for (j = 0; j <= (int) (i%sizeof(SK_U32)); - j ++, pComp ++ ) { - VPD_IN8(pAC,IoC,PCI_VPD_DAT_REG+j, &Data) ; + for (j = 0; j <= (int)(i%sizeof(SK_U32)); j++, pComp++) { + VPD_IN8(pAC, IoC, PCI_VPD_DAT_REG + j, &Data); if (Data != *pComp) { /* Verify Error */ - SK_DBG_MSG(pAC,SK_DBGMOD_VPD, - SK_DBGCAT_ERR, + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, ("WriteStream Verify Error\n")); return(i - (i%sizeof(SK_U32)) + j); } } - } } @@ -374,85 +413,85 @@ * Returns number of bytes read / written. */ static int VpdReadStream( -SK_AC *pAC, /* Adapters context */ -SK_IOC IoC, /* IO Context */ -char *buf, /* data buffer */ +SK_AC *pAC, /* Adapters context */ +SK_IOC IoC, /* IO Context */ +char *buf, /* data buffer */ int Addr, /* VPD start address */ int Len) /* number of bytes to read / to write */ { - int i ; - SK_U16 AdrReg ; - int Rtv ; + int i; + SK_U16 AdrReg; + int Rtv; - for (i=0; i < Len; i ++, buf++) { + for (i = 0; i < Len; i++, buf++) { if ((i%sizeof(SK_U32)) == 0) { /* New Address needs to be written to VPD_ADDR reg */ - AdrReg = (SK_U16) Addr ; + AdrReg = (SK_U16) Addr; Addr += sizeof(SK_U32); - AdrReg &= ~VPD_WRITE ; /* READ operation */ + AdrReg &= ~VPD_WRITE; /* READ operation */ - VPD_OUT16(pAC,IoC,PCI_VPD_ADR_REG, AdrReg) ; + VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, AdrReg); - /* ignore return code here */ - Rtv = VpdWait(pAC,IoC,VPD_READ) ; + /* Wait for termination */ + Rtv = VpdWait(pAC, IoC, VPD_READ); if (Rtv != 0) { - return(i) ; + return(i); } - } - VPD_IN8(pAC,IoC,PCI_VPD_DAT_REG+(i%sizeof(SK_U32)), - (SK_U8 *)buf) ; + VPD_IN8(pAC, IoC, PCI_VPD_DAT_REG + (i%sizeof(SK_U32)), + (SK_U8 *)buf); } - return(Len) ; + return(Len); } /* - * Read ore wirtes 'len' bytes of VPD data, starting at 'addr' from + * Read ore writes 'len' bytes of VPD data, starting at 'addr' from * or to the I2C EEPROM. * * Returns number of bytes read / written. */ static int VpdTransferBlock( -SK_AC *pAC, /* Adapters context */ -SK_IOC IoC, /* IO Context */ -char *buf, /* data buffer */ +SK_AC *pAC, /* Adapters context */ +SK_IOC IoC, /* IO Context */ +char *buf, /* data buffer */ int addr, /* VPD start address */ int len, /* number of bytes to read / to write */ int dir) /* transfer direction may be VPD_READ or VPD_WRITE */ { - int Rtv ; /* Return value */ - int vpd_rom_size ; - SK_U32 our_reg2 ; - - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_CTRL, - ("vpd %s block, addr = 0x%x, len = %d\n", - dir?"write":"read",addr,len)) ; + int Rtv; /* Return value */ + int vpd_rom_size; + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("VPD %s block, addr = 0x%x, len = %d\n", + dir ? "write" : "read", addr, len)); if (len == 0) - return (0) ; + return(0); - VPD_IN32(pAC,IoC,PCI_OUR_REG_2,&our_reg2) ; - vpd_rom_size = 256 << ((our_reg2 & PCI_VPD_ROM_SZ) >> 14); + vpd_rom_size = pAC->vpd.rom_size; + if (addr > vpd_rom_size - 4) { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR|SK_DBGCAT_FATAL, + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL, ("Address error: 0x%x, exp. < 0x%x\n", - addr, vpd_rom_size - 4)) ; - return (0) ; + addr, vpd_rom_size - 4)); + return(0); } + if (addr + len > vpd_rom_size) { - len = vpd_rom_size - addr ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("Warning: len was cut to %d\n",len)) ; + len = vpd_rom_size - addr; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("Warning: len was cut to %d\n", len)); } if (dir == VPD_READ) { Rtv = VpdReadStream(pAC, IoC, buf, addr, len); - } else { + } + else { Rtv = VpdWriteStream(pAC, IoC, buf, addr, len); } - return (Rtv) ; + return(Rtv); } #ifdef SKDIAG @@ -463,13 +502,13 @@ * Returns number of bytes read. */ int VpdReadBlock( -SK_AC *pAC, /* pAC pointer */ -SK_IOC IoC, /* IO Context */ -char *buf, /* buffer were the data should be stored */ +SK_AC *pAC, /* pAC pointer */ +SK_IOC IoC, /* IO Context */ +char *buf, /* buffer were the data should be stored */ int addr, /* start reading at the VPD address */ int len) /* number of bytes to read */ { - return (VpdTransferBlock(pAC, IoC, buf, addr, len, VPD_READ)) ; + return(VpdTransferBlock(pAC, IoC, buf, addr, len, VPD_READ)); } /* @@ -478,13 +517,13 @@ * Returns number of bytes writes. */ int VpdWriteBlock( -SK_AC *pAC, /* pAC pointer */ -SK_IOC IoC, /* IO Context */ -char *buf, /* buffer, holds the data to write */ +SK_AC *pAC, /* pAC pointer */ +SK_IOC IoC, /* IO Context */ +char *buf, /* buffer, holds the data to write */ int addr, /* start writing at the VPD address */ int len) /* number of bytes to write */ { - return (VpdTransferBlock(pAC, IoC, buf, addr, len, VPD_WRITE)) ; + return(VpdTransferBlock(pAC, IoC, buf, addr, len, VPD_WRITE)); } #endif /* SKDIAG */ @@ -498,72 +537,112 @@ * 1: fatal VPD error */ static int VpdInit( -SK_AC *pAC, /* Adapters context */ -SK_IOC IoC) /* IO Context */ +SK_AC *pAC, /* Adapters context */ +SK_IOC IoC) /* IO Context */ { - SK_VPD_PARA *r, rp ; /* RW or RV */ - int i ; - unsigned char x ; + SK_VPD_PARA *r, rp; /* RW or RV */ + int i; + unsigned char x; + int vpd_size; + SK_U16 word; + SK_U32 our_reg2; + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_INIT, ("VpdInit .. ")); + + VPD_IN16(pAC, IoC, PCI_DEVICE_ID, &word); + + VPD_IN32(pAC, IoC, PCI_OUR_REG_2, &our_reg2); + + pAC->vpd.rom_size = 256 << ((our_reg2 & PCI_VPD_ROM_SZ) >> 14); + + /* + * this function might get used before the hardware is initialized + * therefore we cannot always trust in GIChipId + */ + if (((pAC->vpd.v.vpd_status & VPD_VALID) == 0 && + word == VPD_PCI_ID_YUKON) || + ((pAC->vpd.v.vpd_status & VPD_VALID) != 0 && + !(pAC->GIni.GIGenesis))) { + + /* for Yukon the VPD size is always 256 */ + vpd_size = VPD_SIZE_YUKON; + } + else { + /* Genesis uses the maximum ROM size up to 512 for VPD */ + if (pAC->vpd.rom_size > VPD_SIZE_GENESIS) { + vpd_size = VPD_SIZE_GENESIS; + } + else { + vpd_size = pAC->vpd.rom_size; + } + } - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_INIT,("VpdInit .. ")) ; /* read the VPD data into the VPD buffer */ - if (VpdTransferBlock(pAC,IoC,pAC->vpd.vpd_buf,0,VPD_SIZE,VPD_READ) - != VPD_SIZE) { + if (VpdTransferBlock(pAC, IoC, pAC->vpd.vpd_buf, 0, vpd_size, VPD_READ) + != vpd_size) { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("Block Read Error\n")) ; - return(1) ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("Block Read Error\n")); + return(1); } + + pAC->vpd.vpd_size = vpd_size; /* find the end tag of the RO area */ - if (!(r = vpd_find_para(pAC,VPD_RV,&rp))) { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR | SK_DBGCAT_FATAL, - ("Encoding Error: RV Tag not found\n")) ; - return (1) ; + if (!(r = vpd_find_para(pAC, VPD_RV, &rp))) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL, + ("Encoding Error: RV Tag not found\n")); + return(1); } - if (r->p_val + r->p_len > pAC->vpd.vpd_buf + VPD_SIZE/2) { + + if (r->p_val + r->p_len > pAC->vpd.vpd_buf + vpd_size/2) { SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR | SK_DBGCAT_FATAL, - ("Encoding Error: Invalid VPD struct size\n")) ; - return (1) ; + ("Encoding Error: Invalid VPD struct size\n")); + return(1); } - pAC->vpd.v.vpd_free_ro = r->p_len - 1 ; + pAC->vpd.v.vpd_free_ro = r->p_len - 1; /* test the checksum */ - for (i = 0, x = 0; (unsigned)i<=(unsigned)VPD_SIZE/2 - r->p_len; i++) { - x += pAC->vpd.vpd_buf[i] ; + for (i = 0, x = 0; (unsigned)i<=(unsigned)vpd_size/2 - r->p_len; i++) { + x += pAC->vpd.vpd_buf[i]; } + if (x != 0) { /* checksum error */ - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR | SK_DBGCAT_FATAL, - ("VPD Checksum Error\n")) ; - return (1) ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL, + ("VPD Checksum Error\n")); + return(1); } /* find and check the end tag of the RW area */ - if (!(r = vpd_find_para(pAC,VPD_RW,&rp))) { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR | SK_DBGCAT_FATAL, - ("Encoding Error: RV Tag not found\n")) ; - return (1) ; + if (!(r = vpd_find_para(pAC, VPD_RW, &rp))) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL, + ("Encoding Error: RV Tag not found\n")); + return(1); } - if (r->p_val < pAC->vpd.vpd_buf + VPD_SIZE/2) { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR | SK_DBGCAT_FATAL, - ("Encoding Error: Invalid VPD struct size\n")) ; - return (1) ; + + if (r->p_val < pAC->vpd.vpd_buf + vpd_size/2) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL, + ("Encoding Error: Invalid VPD struct size\n")); + return(1); } - pAC->vpd.v.vpd_free_rw = r->p_len ; + pAC->vpd.v.vpd_free_rw = r->p_len; /* everything seems to be ok */ - pAC->vpd.v.vpd_status |= VPD_VALID ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_INIT, + if (pAC->GIni.GIChipId != 0) { + pAC->vpd.v.vpd_status |= VPD_VALID; + } + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_INIT, ("done. Free RO = %d, Free RW = %d\n", - pAC->vpd.v.vpd_free_ro, pAC->vpd.v.vpd_free_rw)) ; + pAC->vpd.v.vpd_free_ro, pAC->vpd.v.vpd_free_rw)); - return(0) ; + return(0); } /* * find the Keyword 'key' in the VPD buffer and fills the - * parameter sturct 'p' with it's values + * parameter struct 'p' with it's values * * returns *p success * 0: parameter was not found or VPD encoding error @@ -573,63 +652,64 @@ char *key, /* keyword to find (e.g. "MN") */ SK_VPD_PARA *p) /* parameter description struct */ { - char *v ; /* points to vpd buffer */ - int max ; /* Maximum Number of Iterations */ + char *v ; /* points to VPD buffer */ + int max; /* Maximum Number of Iterations */ - v = pAC->vpd.vpd_buf ; - max = 128 ; + v = pAC->vpd.vpd_buf; + max = 128; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_CTRL, - ("vpd find para %s .. ",key)) ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("VPD find para %s .. ",key)); /* check mandatory resource type ID string (Product Name) */ - if (*v != (char) RES_ID) { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR | SK_DBGCAT_FATAL, - ("Error: 0x%x missing\n",RES_ID)) ; - return (0) ; - } - - if (strcmp(key,VPD_NAME) == 0) { - p->p_len = VPD_GET_RES_LEN(v) ; - p->p_val = VPD_GET_VAL(v) ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_CTRL, - ("found, len = %d\n",p->p_len)) ; - return(p) ; + if (*v != (char)RES_ID) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL, + ("Error: 0x%x missing\n", RES_ID)); + return(0); + } + + if (strcmp(key, VPD_NAME) == 0) { + p->p_len = VPD_GET_RES_LEN(v); + p->p_val = VPD_GET_VAL(v); + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("found, len = %d\n", p->p_len)); + return(p); } - v += 3 + VPD_GET_RES_LEN(v) + 3 ; - for ( ; ; ) { + v += 3 + VPD_GET_RES_LEN(v) + 3; + for (;; ) { if (SK_MEMCMP(key,v,2) == 0) { - p->p_len = VPD_GET_VPD_LEN(v) ; - p->p_val = VPD_GET_VAL(v) ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_CTRL, - ("found, len = %d\n",p->p_len)) ; - return (p) ; + p->p_len = VPD_GET_VPD_LEN(v); + p->p_val = VPD_GET_VAL(v); + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("found, len = %d\n",p->p_len)); + return(p); } /* exit when reaching the "RW" Tag or the maximum of itera. */ - max-- ; + max--; if (SK_MEMCMP(VPD_RW,v,2) == 0 || max == 0) { - break ; + break; } if (SK_MEMCMP(VPD_RV,v,2) == 0) { - v += 3 + VPD_GET_VPD_LEN(v) + 3 ; /* skip VPD-W */ - } else { - v += 3 + VPD_GET_VPD_LEN(v) ; + v += 3 + VPD_GET_VPD_LEN(v) + 3; /* skip VPD-W */ } - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_CTRL, - ("scanning '%c%c' len = %d\n",v[0],v[1],v[2])) ; + else { + v += 3 + VPD_GET_VPD_LEN(v); + } + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("scanning '%c%c' len = %d\n",v[0],v[1],v[2])); } #ifdef DEBUG - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_CTRL,("not found\n")) ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, ("not found\n")); if (max == 0) { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR | SK_DBGCAT_FATAL, - ("Key/Len Encoding error\n")) ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL, + ("Key/Len Encoding error\n")); } -#endif - return (0) ; +#endif /* DEBUG */ + return(0); } /* @@ -643,24 +723,25 @@ char *end, /* end of memory block to move */ int n) /* number of bytes the memory block has to be moved */ { - char *p ; - int i ; /* number of byte copied */ + char *p; + int i; /* number of byte copied */ if (n == 0) - return ; + return; - i = (int) (end - start + 1) ; + i = (int) (end - start + 1); if (n < 0) { - p = start + n ; + p = start + n; while (i != 0) { - *p++ = *start++ ; - i-- ; + *p++ = *start++; + i--; } - } else { - p = end + n ; + } + else { + p = end + n; while (i != 0) { - *p-- = *end-- ; - i-- ; + *p-- = *end--; + i--; } } } @@ -676,13 +757,13 @@ int len, /* length of the value string */ char *ip) /* inseration point */ { - SK_VPD_KEY *p ; + SK_VPD_KEY *p; - p = (SK_VPD_KEY *) ip ; - p->p_key[0] = key[0] ; - p->p_key[1] = key[1] ; - p->p_len = (unsigned char) len ; - SK_MEMCPY(&p->p_val,buf,len) ; + p = (SK_VPD_KEY *) ip; + p->p_key[0] = key[0]; + p->p_key[1] = key[1]; + p->p_len = (unsigned char) len; + SK_MEMCPY(&p->p_val,buf,len); } /* @@ -696,46 +777,50 @@ SK_AC *pAC, /* common data base */ char *etp) /* end pointer input position */ { - SK_VPD_KEY *p ; - unsigned char x ; - int i ; + SK_VPD_KEY *p; + unsigned char x; + int i; + int vpd_size; + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("VPD modify endtag at 0x%x = '%c%c'\n",etp,etp[0],etp[1])); - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_CTRL, - ("vpd modify endtag at 0x%x = '%c%c'\n",etp,etp[0],etp[1])) ; + vpd_size = pAC->vpd.vpd_size; - p = (SK_VPD_KEY *) etp ; + p = (SK_VPD_KEY *) etp; if (p->p_key[0] != 'R' || (p->p_key[1] != 'V' && p->p_key[1] != 'W')) { /* something wrong here, encoding error */ SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR | SK_DBGCAT_FATAL, - ("Encoding Error: invalid end tag\n")) ; - return(1) ; + ("Encoding Error: invalid end tag\n")); + return(1); } - if (etp > pAC->vpd.vpd_buf + VPD_SIZE/2) { + if (etp > pAC->vpd.vpd_buf + vpd_size/2) { /* create "RW" tag */ - p->p_len = (unsigned char)(pAC->vpd.vpd_buf+VPD_SIZE-etp-3-1) ; - pAC->vpd.v.vpd_free_rw = (int) p->p_len ; - i = pAC->vpd.v.vpd_free_rw ; - etp += 3 ; - } else { + p->p_len = (unsigned char)(pAC->vpd.vpd_buf+vpd_size-etp-3-1); + pAC->vpd.v.vpd_free_rw = (int) p->p_len; + i = pAC->vpd.v.vpd_free_rw; + etp += 3; + } + else { /* create "RV" tag */ - p->p_len = (unsigned char)(pAC->vpd.vpd_buf+VPD_SIZE/2-etp-3) ; - pAC->vpd.v.vpd_free_ro = (int) p->p_len - 1 ; + p->p_len = (unsigned char)(pAC->vpd.vpd_buf+vpd_size/2-etp-3); + pAC->vpd.v.vpd_free_ro = (int) p->p_len - 1; /* setup checksum */ - for (i = 0, x = 0; i < VPD_SIZE/2 - p->p_len; i++) { - x += pAC->vpd.vpd_buf[i] ; + for (i = 0, x = 0; i < vpd_size/2 - p->p_len; i++) { + x += pAC->vpd.vpd_buf[i]; } - p->p_val = (char) 0 - x ; - i = pAC->vpd.v.vpd_free_ro ; - etp += 4 ; + p->p_val = (char) 0 - x; + i = pAC->vpd.v.vpd_free_ro; + etp += 4; } while (i) { - *etp++ = 0x00 ; - i-- ; + *etp++ = 0x00; + i--; } - return (0) ; + return(0); } /* @@ -760,76 +845,81 @@ int type, /* VPD_RO_KEY or VPD_RW_KEY */ int op) /* operation to do: ADD_KEY or OWR_KEY */ { - SK_VPD_PARA vp ; - char *etp ; /* end tag position */ - int free ; /* remaining space in selected area */ - char *ip ; /* input position inside the VPD buffer */ - int rtv ; /* return code */ - int head ; /* additional haeder bytes to move */ - int found ; /* additinoal bytes if the keyword was found */ + SK_VPD_PARA vp; + char *etp; /* end tag position */ + int free; /* remaining space in selected area */ + char *ip; /* input position inside the VPD buffer */ + int rtv; /* return code */ + int head; /* additional haeder bytes to move */ + int found; /* additinoal bytes if the keyword was found */ + int vpd_size; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_CTRL, - ("vpd setup para key = %s, val = %s\n",key,buf)) ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("VPD setup para key = %s, val = %s\n",key,buf)); + + vpd_size = pAC->vpd.vpd_size; - rtv = 0 ; - ip = 0 ; + rtv = 0; + ip = 0; if (type == VPD_RW_KEY) { /* end tag is "RW" */ - free = pAC->vpd.v.vpd_free_rw ; - etp = pAC->vpd.vpd_buf + (VPD_SIZE - free - 1 - 3) ; - } else { + free = pAC->vpd.v.vpd_free_rw; + etp = pAC->vpd.vpd_buf + (vpd_size - free - 1 - 3); + } + else { /* end tag is "RV" */ - free = pAC->vpd.v.vpd_free_ro ; - etp = pAC->vpd.vpd_buf + (VPD_SIZE/2 - free - 4) ; + free = pAC->vpd.v.vpd_free_ro; + etp = pAC->vpd.vpd_buf + (vpd_size/2 - free - 4); } - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_CTRL, + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, ("Free RO = %d, Free RW = %d\n", - pAC->vpd.v.vpd_free_ro, pAC->vpd.v.vpd_free_rw)) ; + pAC->vpd.v.vpd_free_ro, pAC->vpd.v.vpd_free_rw)); - head = 0 ; - found = 0 ; + head = 0; + found = 0; if (op == OWR_KEY) { - if (vpd_find_para(pAC,key,&vp)) { - found = 3 ; - ip = vp.p_val - 3 ; - free += vp.p_len + 3 ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_CTRL, - ("Overwrite Key\n")) ; - } else { - op = ADD_KEY ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_CTRL, - ("Add Key\n")) ; + if (vpd_find_para(pAC, key, &vp)) { + found = 3; + ip = vp.p_val - 3; + free += vp.p_len + 3; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("Overwrite Key\n")); + } + else { + op = ADD_KEY; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, + ("Add Key\n")); } } if (op == ADD_KEY) { - ip = etp ; - vp.p_len = 0 ; - head = 3 ; + ip = etp; + vp.p_len = 0; + head = 3; } if (len + 3 > free) { if (free < 7) { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, ("VPD Buffer Overflow, keyword not written\n")); - return (4) ; + return(4); } /* cut it again */ - len = free - 3 ; - rtv = 2 ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("VPD Buffer Full, Keyword was cut\n")) ; + len = free - 3; + rtv = 2; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("VPD Buffer Full, Keyword was cut\n")); } - vpd_move_para(ip + vp.p_len + found, etp+2, len-vp.p_len+head) ; - vpd_insert_key(key, buf, len, ip) ; + vpd_move_para(ip + vp.p_len + found, etp+2, len-vp.p_len+head); + vpd_insert_key(key, buf, len, ip); if (vpd_mod_endtag(pAC, etp + len - vp.p_len + head)) { - pAC->vpd.v.vpd_status &= ~VPD_VALID ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("VPD Encoding Error\n")) ; - return(6) ; + pAC->vpd.v.vpd_status &= ~VPD_VALID; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("VPD Encoding Error\n")); + return(6); } - return (rtv) ; + return(rtv); } @@ -837,7 +927,7 @@ * Read the contents of the VPD EEPROM and copy it to the * VPD buffer if not already done. * - * return: A pointer to the vpd_status structure. The structure contain + * return: A pointer to the vpd_status structure. The structure contains * this fields. */ SK_VPD_STATUS *VpdStat( @@ -845,9 +935,9 @@ SK_IOC IoC) /* IO Context */ { if (!(pAC->vpd.v.vpd_status & VPD_VALID)) { - (void)VpdInit(pAC,IoC) ; + (void)VpdInit(pAC,IoC); } - return(&pAC->vpd.v) ; + return(&pAC->vpd.v); } @@ -880,67 +970,69 @@ int *len, /* buffer length */ int *elements) /* number of keywords returned */ { - char *v ; - int n ; + char *v; + int n; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_RX,("list vpd keys .. ")) ; - *elements = 0 ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, ("list VPD keys .. ")); + *elements = 0; if (!(pAC->vpd.v.vpd_status & VPD_VALID)) { if (VpdInit(pAC,IoC) != 0 ) { - *len = 0 ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("VPD Init Error, terminated\n")) ; - return(6) ; + *len = 0; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("VPD Init Error, terminated\n")); + return(6); } } if ((signed)strlen(VPD_NAME) + 1 <= *len) { - v = pAC->vpd.vpd_buf ; - strcpy(buf,VPD_NAME) ; - n = strlen(VPD_NAME) + 1 ; - buf += n ; - *elements = 1 ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_RX, - ("'%c%c' ",v[0],v[1])) ; - } else { - *len = 0 ; + v = pAC->vpd.vpd_buf; + strcpy(buf,VPD_NAME); + n = strlen(VPD_NAME) + 1; + buf += n; + *elements = 1; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, + ("'%c%c' ",v[0],v[1])); + } + else { + *len = 0; SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("buffer overflow\n")) ; - return(2) ; + ("buffer overflow\n")); + return(2); } - v += 3 + VPD_GET_RES_LEN(v) + 3 ; - for ( ; ; ) { + v += 3 + VPD_GET_RES_LEN(v) + 3; + for (;; ) { /* exit when reaching the "RW" Tag */ if (SK_MEMCMP(VPD_RW,v,2) == 0) { - break ; + break; } if (SK_MEMCMP(VPD_RV,v,2) == 0) { - v += 3 + VPD_GET_VPD_LEN(v) + 3 ; /* skip VPD-W */ - continue ; + v += 3 + VPD_GET_VPD_LEN(v) + 3; /* skip VPD-W */ + continue; } if (n+3 <= *len) { - SK_MEMCPY(buf,v,2) ; - buf += 2 ; - *buf++ = '\0' ; - n += 3 ; - v += 3 + VPD_GET_VPD_LEN(v) ; - *elements += 1 ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_RX, - ("'%c%c' ",v[0],v[1])) ; - } else { - *len = n ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("buffer overflow\n")) ; - return (2) ; + SK_MEMCPY(buf,v,2); + buf += 2; + *buf++ = '\0'; + n += 3; + v += 3 + VPD_GET_VPD_LEN(v); + *elements += 1; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, + ("'%c%c' ",v[0],v[1])); + } + else { + *len = n; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("buffer overflow\n")); + return(2); } } - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_RX,("\n")) ; - *len = n ; - return(0) ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, ("\n")); + *len = n; + return(0); } @@ -964,34 +1056,35 @@ char *buf, /* buffer where to copy the keyword value */ int *len) /* buffer length */ { - SK_VPD_PARA *p, vp ; + SK_VPD_PARA *p, vp; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_RX,("vpd read %s .. ",key)) ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, ("VPD read %s .. ", key)); if (!(pAC->vpd.v.vpd_status & VPD_VALID)) { if (VpdInit(pAC,IoC) != 0 ) { - *len = 0 ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("vpd init error\n")) ; - return(6) ; + *len = 0; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("VPD init error\n")); + return(6); } } - if ((p = vpd_find_para(pAC,key,&vp))) { + if ((p = vpd_find_para(pAC, key, &vp)) != NULL) { if (p->p_len > (*(unsigned *)len)-1) { - p->p_len = *len - 1 ; + p->p_len = *len - 1; } - SK_MEMCPY(buf,p->p_val,p->p_len) ; - buf[p->p_len] = '\0' ; - *len = p->p_len ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_RX, + SK_MEMCPY(buf, p->p_val, p->p_len); + buf[p->p_len] = '\0'; + *len = p->p_len; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, ("%c%c%c%c.., len = %d\n", - buf[0],buf[1],buf[2],buf[3],*len)) ; - } else { - *len = 0 ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR,("not found\n")) ; - return (1) ; + buf[0],buf[1],buf[2],buf[3],*len)); } - return (0) ; + else { + *len = 0; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, ("not found\n")); + return(1); + } + return(0); } @@ -1009,9 +1102,9 @@ key[1] < '0' || key[1] > 'Z' || (key[1] > '9' && key[1] < 'A') || strlen(key) != 2) { - return (SK_FALSE) ; + return(SK_FALSE); } - return (SK_TRUE) ; + return(SK_TRUE); } /* @@ -1033,46 +1126,46 @@ char *key, /* keyword to write (allowed values "Yx", "Vx") */ char *buf) /* buffer where the keyword value can be read from */ { - int len ; /* lenght of the keyword to write */ - int rtv ; /* return code */ - int rtv2 ; + int len; /* length of the keyword to write */ + int rtv; /* return code */ + int rtv2; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_TX, - ("vpd write %s = %s\n",key,buf)) ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, + ("VPD write %s = %s\n",key,buf)); if ((*key != 'Y' && *key != 'V') || key[1] < '0' || key[1] > 'Z' || (key[1] > '9' && key[1] < 'A') || strlen(key) != 2) { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("illegal key tag, keyword not written\n")) ; - return (5) ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("illegal key tag, keyword not written\n")); + return(5); } if (!(pAC->vpd.v.vpd_status & VPD_VALID)) { if (VpdInit(pAC,IoC) != 0 ) { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("vpd init error\n")) ; - return(6) ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("VPD init error\n")); + return(6); } } - rtv = 0 ; - len = strlen(buf) ; + rtv = 0; + len = strlen(buf); if (len > VPD_MAX_LEN) { /* cut it */ - len = VPD_MAX_LEN ; - rtv = 2 ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("keyword to long, cut after %d bytes\n",VPD_MAX_LEN)) ; - } - if ((rtv2 = VpdSetupPara(pAC,key,buf,len,VPD_RW_KEY,OWR_KEY)) != 0) { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("vpd write error\n")) ; - return(rtv2) ; + len = VPD_MAX_LEN; + rtv = 2; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("keyword to long, cut after %d bytes\n",VPD_MAX_LEN)); + } + if ((rtv2 = VpdSetupPara(pAC, key,buf, len, VPD_RW_KEY, OWR_KEY)) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("VPD write error\n")); + return(rtv2); } - return (rtv) ; + return(rtv); } /* @@ -1092,43 +1185,47 @@ SK_IOC IoC, /* IO Context */ char *key) /* keyword to read (e.g. "MN") */ { - SK_VPD_PARA *p, vp ; - char *etp ; + SK_VPD_PARA *p, vp; + char *etp; + int vpd_size; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_TX,("vpd delete key %s\n",key)) ; + vpd_size = pAC->vpd.vpd_size; + + SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_TX,("VPD delete key %s\n",key)); if (!(pAC->vpd.v.vpd_status & VPD_VALID)) { if (VpdInit(pAC,IoC) != 0 ) { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("vpd init error\n")) ; - return(6) ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("VPD init error\n")); + return(6); } } - if ((p = vpd_find_para(pAC,key,&vp))) { - if (p->p_val < pAC->vpd.vpd_buf + VPD_SIZE/2) { + if ((p = vpd_find_para(pAC, key, &vp)) != NULL) { + if (p->p_val < pAC->vpd.vpd_buf + vpd_size/2) { /* try to delete read only keyword */ - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("cannot delete RO keyword\n")) ; - return (5) ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("cannot delete RO keyword\n")); + return(5); } - etp = pAC->vpd.vpd_buf + (VPD_SIZE-pAC->vpd.v.vpd_free_rw-1-3) ; + etp = pAC->vpd.vpd_buf + (vpd_size-pAC->vpd.v.vpd_free_rw-1-3); vpd_move_para(vp.p_val+vp.p_len, etp+2, - - ((int)(vp.p_len + 3))) ; + - ((int)(vp.p_len + 3))); if (vpd_mod_endtag(pAC, etp - vp.p_len - 3)) { - pAC->vpd.v.vpd_status &= ~VPD_VALID ; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("vpd encoding error\n")) ; - return(6) ; + pAC->vpd.v.vpd_status &= ~VPD_VALID; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("VPD encoding error\n")); + return(6); } - } else { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("keyword not found\n")) ; - return (1) ; + } + else { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("keyword not found\n")); + return(1); } - return (0) ; + return(0); } /* @@ -1142,18 +1239,22 @@ SK_AC *pAC, /* Adapters context */ SK_IOC IoC) /* IO Context */ { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_TX,("vpd update .. ")) ; + int vpd_size; + + vpd_size = pAC->vpd.vpd_size; + + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("VPD update .. ")); if (pAC->vpd.v.vpd_status & VPD_VALID) { - if (VpdTransferBlock(pAC,IoC,pAC->vpd.vpd_buf + VPD_SIZE/2, - VPD_SIZE/2, VPD_SIZE/2, VPD_WRITE) != VPD_SIZE/2) { + if (VpdTransferBlock(pAC, IoC, pAC->vpd.vpd_buf + vpd_size/2, + vpd_size/2, vpd_size/2, VPD_WRITE) != vpd_size/2) { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("transfer timed out\n")) ; - return(3) ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("transfer timed out\n")); + return(3); } } - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_TX,("done\n")) ; - return (0) ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("done\n")); + return(0); } @@ -1173,32 +1274,33 @@ SK_IOC IoC, /* IO Context */ char *msg) /* error log message */ { - SK_VPD_PARA *v, vf ; /* VF */ - int len ; + SK_VPD_PARA *v, vf; /* VF */ + int len; - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_TX, - ("vpd error log msg %s\n",msg)) ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, + ("VPD error log msg %s\n",msg)); if (!(pAC->vpd.v.vpd_status & VPD_VALID)) { if (VpdInit(pAC,IoC) != 0 ) { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR, - ("vpd init error\n")) ; - return ; + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, + ("VPD init error\n")); + return; } } - len = strlen(msg) ; + len = strlen(msg); if (len > VPD_MAX_LEN) { /* cut it */ - len = VPD_MAX_LEN ; + len = VPD_MAX_LEN; + } + if ((v = vpd_find_para(pAC, VPD_VF, &vf)) != NULL) { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("overwrite VL\n")); + (void)VpdSetupPara(pAC, VPD_VL, msg, len, VPD_RW_KEY, OWR_KEY); } - if ((v = vpd_find_para(pAC,VPD_VF,&vf))) { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_TX,("overwrite VL\n")) ; - (void)VpdSetupPara(pAC,VPD_VL,msg,len,VPD_RW_KEY,OWR_KEY) ; - } else { - SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_TX,("write VF\n")) ; - (void)VpdSetupPara(pAC,VPD_VF,msg,len,VPD_RW_KEY,ADD_KEY) ; + else { + SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("write VF\n")); + (void)VpdSetupPara(pAC, VPD_VF, msg,len, VPD_RW_KEY, ADD_KEY); } - (void)VpdUpdate(pAC,IoC) ; + (void)VpdUpdate(pAC, IoC); } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/sk98lin/skxmac2.c linux.21pre4-ac2/drivers/net/sk98lin/skxmac2.c --- linux.21pre4/drivers/net/sk98lin/skxmac2.c 2003-01-29 16:26:44.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/sk98lin/skxmac2.c 2003-01-06 15:38:18.000000000 +0000 @@ -2,15 +2,15 @@ * * Name: skxmac2.c * Project: GEnesis, PCI Gigabit Ethernet Adapter - * Version: $Revision: 1.61 $ - * Date: $Date: 2001/02/09 15:40:59 $ - * Purpose: Contains functions to initialize the XMAC II + * Version: $Revision: 1.87 $ + * Date: $Date: 2002/12/10 14:39:05 $ + * Purpose: Contains functions to initialize the MACs and PHYs * ******************************************************************************/ /****************************************************************************** * - * (C)Copyright 1998-2001 SysKonnect GmbH. + * (C)Copyright 1998-2002 SysKonnect GmbH. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -26,6 +26,152 @@ * History: * * $Log: skxmac2.c,v $ + * Revision 1.87 2002/12/10 14:39:05 rschmidt + * Improved initialization of GPHY in SkGmInitPhyMarv(). + * Editorial changes. + * + * Revision 1.86 2002/12/09 15:01:12 rschmidt + * Added setup of Ext. PHY Specific Ctrl Reg (downshift feature). + * + * Revision 1.85 2002/12/05 14:09:16 rschmidt + * Improved avoiding endless loop in SkGmPhyWrite(), SkGmPhyWrite(). + * Added additional advertising for 10Base-T when 100Base-T is selected. + * Added case SK_PHY_MARV_FIBER for YUKON Fiber adapter. + * Editorial changes. + * + * Revision 1.84 2002/11/15 12:50:09 rschmidt + * Changed SkGmCableDiagStatus() when getting results. + * + * Revision 1.83 2002/11/13 10:28:29 rschmidt + * Added some typecasts to avoid compiler warnings. + * + * Revision 1.82 2002/11/13 09:20:46 rschmidt + * Replaced for(..) with do {} while (...) in SkXmUpdateStats(). + * Replaced 2 macros GM_IN16() with 1 GM_IN32() in SkGmMacStatistic(). + * Added SkGmCableDiagStatus() for Virtual Cable Test (VCT). + * Editorial changes. + * + * Revision 1.81 2002/10/28 14:28:08 rschmidt + * Changed MAC address setup for GMAC in SkGmInitMac(). + * Optimized handling of counter overflow IRQ in SkGmOverflowStatus(). + * Editorial changes. + * + * Revision 1.80 2002/10/14 15:29:44 rschmidt + * Corrected disabling of all PHY IRQs. + * Added WA for deviation #16 (address used for pause packets). + * Set Pause Mode in SkMacRxTxEnable() only for Genesis. + * Added IRQ and counter for Receive FIFO Overflow in DEBUG-mode. + * SkXmTimeStamp() replaced by SkMacTimeStamp(). + * Added clearing of GMAC Tx FIFO Underrun IRQ in SkGmIrq(). + * Editorial changes. + * + * Revision 1.79 2002/10/10 15:55:36 mkarl + * changes for PLinkSpeedUsed + * + * Revision 1.78 2002/09/12 09:39:51 rwahl + * Removed deactivate code for SIRQ overflow event separate for TX/RX. + * + * Revision 1.77 2002/09/09 12:26:37 mkarl + * added handling for Yukon to SkXmTimeStamp + * + * Revision 1.76 2002/08/21 16:41:16 rschmidt + * Added bit GPC_ENA_XC (Enable MDI crossover) in HWCFG_MODE. + * Added forced speed settings in SkGmInitPhyMarv(). + * Added settings of full/half duplex capabilities for YUKON Fiber. + * Editorial changes. + * + * Revision 1.75 2002/08/16 15:12:01 rschmidt + * Replaced all if(GIChipId == CHIP_ID_GENESIS) with new entry GIGenesis. + * Added function SkMacHashing() for ADDR-Module. + * Removed functions SkXmClrSrcCheck(), SkXmClrHashAddr() (calls replaced + * with macros). + * Removed functions SkGmGetMuxConfig(). + * Added HWCFG_MODE init for YUKON Fiber. + * Changed initialization of GPHY in SkGmInitPhyMarv(). + * Changed check of parameter in SkXmMacStatistic(). + * Editorial changes. + * + * Revision 1.74 2002/08/12 14:00:17 rschmidt + * Replaced usage of Broadcom PHY Ids with defines. + * Corrected error messages in SkGmMacStatistic(). + * Made SkMacPromiscMode() public for ADDR-Modul. + * Editorial changes. + * + * Revision 1.73 2002/08/08 16:26:24 rschmidt + * Improved reset sequence for YUKON in SkGmHardRst() and SkGmInitMac(). + * Replaced XMAC Rx High Watermark init value with SK_XM_RX_HI_WM. + * Editorial changes. + * + * Revision 1.72 2002/07/24 15:11:19 rschmidt + * Fixed wrong placement of parenthesis. + * Editorial changes. + * + * Revision 1.71 2002/07/23 16:05:18 rschmidt + * Added global functions for PHY: SkGePhyRead(), SkGePhyWrite(). + * Fixed Tx Counter Overflow IRQ (Bug ID #10730). + * Editorial changes. + * + * Revision 1.70 2002/07/18 14:27:27 rwahl + * Fixed syntax error. + * + * Revision 1.69 2002/07/17 17:08:47 rwahl + * Fixed check in SkXmMacStatistic(). + * + * Revision 1.68 2002/07/16 07:35:24 rwahl + * Removed check for cleared mib counter in SkGmResetCounter(). + * + * Revision 1.67 2002/07/15 18:35:56 rwahl + * Added SkXmUpdateStats(), SkGmUpdateStats(), SkXmMacStatistic(), + * SkGmMacStatistic(), SkXmResetCounter(), SkGmResetCounter(), + * SkXmOverflowStatus(), SkGmOverflowStatus(). + * Changes to SkXmIrq() & SkGmIrq(): Combined SIRQ Overflow for both + * RX & TX. + * Changes to SkGmInitMac(): call to SkGmResetCounter(). + * Editorial changes. + * + * Revision 1.66 2002/07/15 15:59:30 rschmidt + * Added PHY Address in SkXmPhyRead(), SkXmPhyWrite(). + * Added MIB Clear Counter in SkGmInitMac(). + * Added Duplex and Flow-Control settings. + * Reset all Multicast filtering Hash reg. in SkGmInitMac(). + * Added new function: SkGmGetMuxConfig(). + * Editorial changes. + * + * Revision 1.65 2002/06/10 09:35:39 rschmidt + * Replaced C++ comments (//). + * Added #define VCPU around VCPUwaitTime. + * Editorial changes. + * + * Revision 1.64 2002/06/05 08:41:10 rschmidt + * Added function for XMAC2: SkXmTimeStamp(). + * Added function for YUKON: SkGmSetRxCmd(). + * Changed SkGmInitMac() resp. SkGmHardRst(). + * Fixed wrong variable in SkXmAutoNegLipaXmac() (debug mode). + * SkXmRxTxEnable() replaced by SkMacRxTxEnable(). + * Editorial changes. + * + * Revision 1.63 2002/04/25 13:04:44 rschmidt + * Changes for handling YUKON. + * Use of #ifdef OTHER_PHY to eliminate code for unused Phy types. + * Macros for XMAC PHY access PHY_READ(), PHY_WRITE() replaced + * by functions SkXmPhyRead(), SkXmPhyWrite(); + * Removed use of PRxCmd to setup XMAC. + * Added define PHY_B_AS_PAUSE_MSK for BCom Pause Res. + * Added setting of XM_RX_DIS_CEXT in SkXmInitMac(). + * Removed status parameter from MAC IRQ handler SkMacIrq(), + * SkXmIrq() and SkGmIrq(). + * SkXmAutoNegLipa...() for ext. Phy replaced by SkMacAutoNegLipaPhy(). + * Added SkMac...() functions to handle both XMAC and GMAC. + * Added functions for YUKON: SkGmHardRst(), SkGmSoftRst(), + * SkGmSetRxTxEn(), SkGmIrq(), SkGmInitMac(), SkGmInitPhyMarv(), + * SkGmAutoNegDoneMarv(), SkGmPhyRead(), SkGmPhyWrite(). + * Changes for V-CPU support. + * Editorial changes. + * + * Revision 1.62 2001/08/06 09:50:14 rschmidt + * Workaround BCOM Errata #1 for the C5 type. + * Editorial changes. + * * Revision 1.61 2001/02/09 15:40:59 rassmann * Editorial changes. * @@ -194,7 +340,7 @@ * * Revision 1.12 1998/10/14 14:45:04 malthoff * Remove SKERR_SIRQ_E0xx and SKERR_SIRQ_E0xxMSG by - * SKERR_HWI_Exx and SKERR_HWI_E0xxMSG to be independant + * SKERR_HWI_Exx and SKERR_HWI_E0xxMSG to be independent * from the Sirq module. * * Revision 1.11 1998/10/14 13:59:01 gklug @@ -203,17 +349,16 @@ * Revision 1.10 1998/10/14 11:20:57 malthoff * Make SkXmAutoNegDone() public, because it's * used in diagnostics, too. - * The Link Up event to the RLMT is issued in - * SkXmIrq(). SkXmIrq() is not available in - * diagnostics. Use PHY_READ when reading - * PHY registers. + * The Link Up event to the RLMT is issued in SkXmIrq(). + * SkXmIrq() is not available in diagnostics. + * Use PHY_READ when reading PHY registers. * * Revision 1.9 1998/10/14 05:50:10 cgoos * Added definition for Para. * * Revision 1.8 1998/10/14 05:41:28 gklug * add: Xmac IRQ - * add: auto negotiation done function + * add: auto-negotiation done function * * Revision 1.7 1998/10/09 06:55:20 malthoff * The configuration of the XMACs Tx Request Threshold @@ -246,17 +391,9 @@ ******************************************************************************/ #include "h/skdrv1st.h" -#include "h/xmac_ii.h" #include "h/skdrv2nd.h" -/* defines ********************************************************************/ /* typedefs *******************************************************************/ -/* global variables ***********************************************************/ - -/* local variables ************************************************************/ - -static const char SysKonnectFileId[] = - "@(#)$Id: skxmac2.c,v 1.61 2001/02/09 15:40:59 rassmann Exp $ (C) SK "; /* BCOM PHY magic pattern list */ typedef struct s_PhyHack { @@ -264,6 +401,10 @@ SK_U16 PhyVal; /* Value to write */ } BCOM_HACK; +/* local variables ************************************************************/ +static const char SysKonnectFileId[] = + "@(#)$Id: skxmac2.c,v 1.87 2002/12/10 14:39:05 rschmidt Exp $ (C) SK "; + BCOM_HACK BcomRegA1Hack[] = { { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, @@ -279,308 +420,797 @@ /* function prototypes ********************************************************/ static void SkXmInitPhyXmac(SK_AC*, SK_IOC, int, SK_BOOL); static void SkXmInitPhyBcom(SK_AC*, SK_IOC, int, SK_BOOL); -static void SkXmInitPhyLone(SK_AC*, SK_IOC, int, SK_BOOL); -static void SkXmInitPhyNat (SK_AC*, SK_IOC, int, SK_BOOL); +static void SkGmInitPhyMarv(SK_AC*, SK_IOC, int, SK_BOOL); static int SkXmAutoNegDoneXmac(SK_AC*, SK_IOC, int); static int SkXmAutoNegDoneBcom(SK_AC*, SK_IOC, int); +static int SkGmAutoNegDoneMarv(SK_AC*, SK_IOC, int); +#ifdef OTHER_PHY +static void SkXmInitPhyLone(SK_AC*, SK_IOC, int, SK_BOOL); +static void SkXmInitPhyNat (SK_AC*, SK_IOC, int, SK_BOOL); static int SkXmAutoNegDoneLone(SK_AC*, SK_IOC, int); static int SkXmAutoNegDoneNat (SK_AC*, SK_IOC, int); +#endif /* OTHER_PHY */ + + /****************************************************************************** * - * SkXmSetRxCmd() - Modify the value of the XMACs Rx Command Register + * SkXmPhyRead() - Read from XMAC PHY register * - * Description: - * The features - * o FCS stripping, SK_STRIP_FCS_ON/OFF - * o pad byte stripping, SK_STRIP_PAD_ON/OFF - * o don't set XMR_FS_ERR in frame SK_LENERR_OK_ON/OFF - * status for inrange length error - * frames, and - * o don't set XMR_FS_ERR in frame SK_BIG_PK_OK_ON/OFF - * status for frames > 1514 bytes - * - * for incomming packets may be enabled/disabled by this function. - * Additional modes may be added later. - * Multiple modes can be enabled/disabled at the same time. - * The new configuration is stored into the HWAC port configuration - * and is written to the Receive Command register immediatlely. - * The new configuration is saved over any SkGePortStop() and - * SkGeInitPort() calls. The configured value will be overwritten - * when SkGeInit(Level 0) is executed. + * Description: reads a 16-bit word from XMAC PHY or ext. PHY * * Returns: * nothing */ -void SkXmSetRxCmd( -SK_AC *pAC, /* adapter context */ -SK_IOC IoC, /* IO context */ -int Port, /* The XMAC to handle with belongs to this Port */ -int Mode) /* Mode is SK_STRIP_FCS_ON/OFF, SK_STRIP_PAD_ON/OFF, - SK_LENERR_OK_ON/OFF, or SK_BIG_PK_OK_ON/OFF */ +void SkXmPhyRead( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +int Port, /* Port Index (MAC_1 + n) */ +int PhyReg, /* Register Address (Offset) */ +SK_U16 *pVal) /* Pointer to Value */ { + SK_U16 Mmu; SK_GEPORT *pPrt; - SK_U16 OldRxMode; pPrt = &pAC->GIni.GP[Port]; - OldRxMode = pPrt->PRxCmd; + + /* write the PHY register's address */ + XM_OUT16(IoC, Port, XM_PHY_ADDR, PhyReg | pPrt->PhyAddr); + + /* get the PHY register's value */ + XM_IN16(IoC, Port, XM_PHY_DATA, pVal); + + if (pPrt->PhyType != SK_PHY_XMAC) { + do { + XM_IN16(IoC, Port, XM_MMU_CMD, &Mmu); + /* wait until 'Ready' is set */ + } while ((Mmu & XM_MMU_PHY_RDY) == 0); - switch(Mode & (SK_STRIP_FCS_ON | SK_STRIP_FCS_OFF)) { - case SK_STRIP_FCS_ON: - pPrt->PRxCmd |= XM_RX_STRIP_FCS; - break; - case SK_STRIP_FCS_OFF: - pPrt->PRxCmd &= ~XM_RX_STRIP_FCS; - break; + /* get the PHY register's value */ + XM_IN16(IoC, Port, XM_PHY_DATA, pVal); } +} /* SkXmPhyRead */ - switch(Mode & (SK_STRIP_PAD_ON | SK_STRIP_PAD_OFF)) { - case SK_STRIP_PAD_ON: - pPrt->PRxCmd |= XM_RX_STRIP_PAD; - break; - case SK_STRIP_PAD_OFF: - pPrt->PRxCmd &= ~XM_RX_STRIP_PAD; - break; - } - switch(Mode & (SK_LENERR_OK_ON | SK_LENERR_OK_OFF)) { - case SK_LENERR_OK_ON: - pPrt->PRxCmd |= XM_RX_LENERR_OK; - break; - case SK_LENERR_OK_OFF: - pPrt->PRxCmd &= ~XM_RX_LENERR_OK; - break; - } +/****************************************************************************** + * + * SkXmPhyWrite() - Write to XMAC PHY register + * + * Description: writes a 16-bit word to XMAC PHY or ext. PHY + * + * Returns: + * nothing + */ +void SkXmPhyWrite( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +int Port, /* Port Index (MAC_1 + n) */ +int PhyReg, /* Register Address (Offset) */ +SK_U16 Val) /* Value */ +{ + SK_U16 Mmu; + SK_GEPORT *pPrt; - switch(Mode & (SK_BIG_PK_OK_ON | SK_BIG_PK_OK_OFF)) { - case SK_BIG_PK_OK_ON: - pPrt->PRxCmd |= XM_RX_BIG_PK_OK; - break; - case SK_BIG_PK_OK_OFF: - pPrt->PRxCmd &= ~XM_RX_BIG_PK_OK; - break; + pPrt = &pAC->GIni.GP[Port]; + + if (pPrt->PhyType != SK_PHY_XMAC) { + do { + XM_IN16(IoC, Port, XM_MMU_CMD, &Mmu); + /* wait until 'Busy' is cleared */ + } while ((Mmu & XM_MMU_PHY_BUSY) != 0); } - - /* Write the new mode to the receive command register if required */ - if (OldRxMode != pPrt->PRxCmd) { - XM_OUT16(IoC, Port, XM_RX_CMD, pPrt->PRxCmd); + + /* write the PHY register's address */ + XM_OUT16(IoC, Port, XM_PHY_ADDR, PhyReg | pPrt->PhyAddr); + + /* write the PHY register's value */ + XM_OUT16(IoC, Port, XM_PHY_DATA, Val); + + if (pPrt->PhyType != SK_PHY_XMAC) { + do { + XM_IN16(IoC, Port, XM_MMU_CMD, &Mmu); + /* wait until 'Busy' is cleared */ + } while ((Mmu & XM_MMU_PHY_BUSY) != 0); } -} /* SkXmSetRxCmd*/ +} /* SkXmPhyWrite */ /****************************************************************************** * - * SkXmClrExactAddr() - Clear Exact Match Address Registers + * SkGmPhyRead() - Read from GPHY register * - * Description: - * All Exact Match Address registers of the XMAC 'Port' will be - * cleared starting with 'StartNum' up to (and including) the - * Exact Match address number of 'StopNum'. + * Description: reads a 16-bit word from GPHY through MDIO * * Returns: * nothing */ -void SkXmClrExactAddr( -SK_AC *pAC, /* adapter context */ -SK_IOC IoC, /* IO context */ -int Port, /* The XMAC to handle with belongs to this Port */ -int StartNum, /* Begin with this Address Register Index (0..15) */ -int StopNum) /* Stop after finished with this Register Idx (0..15) */ +void SkGmPhyRead( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +int Port, /* Port Index (MAC_1 + n) */ +int PhyReg, /* Register Address (Offset) */ +SK_U16 *pVal) /* Pointer to Value */ { - int i; - SK_U16 ZeroAddr[3] = {0x0000, 0x0000, 0x0000}; + SK_U16 Ctrl; + SK_GEPORT *pPrt; +#ifdef VCPU + u_long SimCyle; + u_long SimLowTime; + + VCPUgetTime(&SimCyle, &SimLowTime); + VCPUprintf(0, "SkGmPhyRead(%u), SimCyle=%u, SimLowTime=%u\n", + PhyReg, SimCyle, SimLowTime); +#endif /* VCPU */ + + pPrt = &pAC->GIni.GP[Port]; + + /* set PHY-Register offset and 'Read' OpCode (= 1) */ + *pVal = GM_SMI_CT_PHY_AD(pPrt->PhyAddr) | GM_SMI_CT_REG_AD(PhyReg) | + GM_SMI_CT_OP_RD; - if ((unsigned)StartNum > 15 || (unsigned)StopNum > 15 || - StartNum > StopNum) { + GM_OUT16(IoC, Port, GM_SMI_CTRL, *pVal); - SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E001, SKERR_HWI_E001MSG); + GM_IN16(IoC, Port, GM_SMI_CTRL, &Ctrl); + + /* additional check for MDC/MDIO activity */ + if ((Ctrl & GM_SMI_CT_BUSY) == 0) { + *pVal = 0; return; } - for (i = StartNum; i <= StopNum; i++) { - XM_OUTADDR(IoC, Port, XM_EXM(i), &ZeroAddr[0]); + *pVal |= GM_SMI_CT_BUSY; + + do { +#ifdef VCPU + VCPUwaitTime(1000); +#endif /* VCPU */ + + GM_IN16(IoC, Port, GM_SMI_CTRL, &Ctrl); + + /* wait until 'ReadValid' is set */ + } while (Ctrl == *pVal); + + /* get the PHY register's value */ + GM_IN16(IoC, Port, GM_SMI_DATA, pVal); + +#ifdef VCPU + VCPUgetTime(&SimCyle, &SimLowTime); + VCPUprintf(0, "VCPUgetTime(), SimCyle=%u, SimLowTime=%u\n", + SimCyle, SimLowTime); +#endif /* VCPU */ +} /* SkGmPhyRead */ + + +/****************************************************************************** + * + * SkGmPhyWrite() - Write to GPHY register + * + * Description: writes a 16-bit word to GPHY through MDIO + * + * Returns: + * nothing + */ +void SkGmPhyWrite( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +int Port, /* Port Index (MAC_1 + n) */ +int PhyReg, /* Register Address (Offset) */ +SK_U16 Val) /* Value */ +{ + SK_U16 Ctrl; + SK_GEPORT *pPrt; +#ifdef VCPU + SK_U32 DWord; + u_long SimCyle; + u_long SimLowTime; + + VCPUgetTime(&SimCyle, &SimLowTime); + VCPUprintf(0, "SkGmPhyWrite(Reg=%u, Val=0x%04x), SimCyle=%u, SimLowTime=%u\n", + PhyReg, Val, SimCyle, SimLowTime); +#endif /* VCPU */ + + pPrt = &pAC->GIni.GP[Port]; + + /* write the PHY register's value */ + GM_OUT16(IoC, Port, GM_SMI_DATA, Val); + + /* set PHY-Register offset and 'Write' OpCode (= 0) */ + Val = GM_SMI_CT_PHY_AD(pPrt->PhyAddr) | GM_SMI_CT_REG_AD(PhyReg); + + GM_OUT16(IoC, Port, GM_SMI_CTRL, Val); + + GM_IN16(IoC, Port, GM_SMI_CTRL, &Ctrl); + + /* additional check for MDC/MDIO activity */ + if ((Ctrl & GM_SMI_CT_BUSY) == 0) { + return; } -} /* SkXmClrExactAddr */ + + Val |= GM_SMI_CT_BUSY; + + do { +#ifdef VCPU + /* read Timer value */ + SK_IN32(IoC, B2_TI_VAL, &DWord); + + VCPUwaitTime(1000); +#endif /* VCPU */ + + GM_IN16(IoC, Port, GM_SMI_CTRL, &Ctrl); + + /* wait until 'Busy' is cleared */ + } while (Ctrl == Val); + +#ifdef VCPU + VCPUgetTime(&SimCyle, &SimLowTime); + VCPUprintf(0, "VCPUgetTime(), SimCyle=%u, SimLowTime=%u\n", + SimCyle, SimLowTime); +#endif /* VCPU */ +} /* SkGmPhyWrite */ /****************************************************************************** * - * SkXmClrSrcCheck() - Clear Source Check Address Register + * SkGePhyRead() - Read from PHY register * - * Description: - * The Source Check Address Register of the XMAC 'Port' number - * will be cleared. + * Description: calls a read PHY routine dep. on board type * * Returns: * nothing */ -static void SkXmClrSrcCheck( -SK_AC *pAC, /* adapter context */ -SK_IOC IoC, /* IO context */ -int Port) /* The XMAC to handle with belongs to this Port (MAC_1 + n) */ +void SkGePhyRead( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +int Port, /* Port Index (MAC_1 + n) */ +int PhyReg, /* Register Address (Offset) */ +SK_U16 *pVal) /* Pointer to Value */ { - SK_U16 ZeroAddr[3] = {0x0000, 0x0000, 0x0000}; + void (*r_func)(SK_AC *pAC, SK_IOC IoC, int Port, int Reg, SK_U16 *pVal); - XM_OUTHASH(IoC, Port, XM_SRC_CHK, &ZeroAddr); -} /* SkXmClrSrcCheck */ + if (pAC->GIni.GIGenesis) { + r_func = SkXmPhyRead; + } + else { + r_func = SkGmPhyRead; + } + + r_func(pAC, IoC, Port, PhyReg, pVal); +} /* SkGePhyRead */ /****************************************************************************** * - * SkXmClrHashAddr() - Clear Hash Address Registers + * SkGePhyWrite() - Write to PHY register * - * Description: - * The Hash Address Register of the XMAC 'Port' will be cleared. + * Description: calls a write PHY routine dep. on board type * * Returns: * nothing */ -static void SkXmClrHashAddr( -SK_AC *pAC, /* adapter context */ -SK_IOC IoC, /* IO context */ -int Port) /* The XMAC to handle with belongs to this Port (MAC_1 + n) */ +void SkGePhyWrite( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* I/O Context */ +int Port, /* Port Index (MAC_1 + n) */ +int PhyReg, /* Register Address (Offset) */ +SK_U16 Val) /* Value */ { - SK_U16 ZeroAddr[4] = {0x0000, 0x0000, 0x0000, 0x0000}; + void (*w_func)(SK_AC *pAC, SK_IOC IoC, int Port, int Reg, SK_U16 Val); - XM_OUTHASH(IoC, Port, XM_HSM, &ZeroAddr); -} /* SkXmClrHashAddr*/ + if (pAC->GIni.GIGenesis) { + w_func = SkXmPhyWrite; + } + else { + w_func = SkGmPhyWrite; + } + + w_func(pAC, IoC, Port, PhyReg, Val); +} /* SkGePhyWrite */ /****************************************************************************** * - * SkXmFlushTxFifo() - Flush the XMACs transmit FIFO + * SkMacPromiscMode() - Enable / Disable Promiscuous Mode * * Description: - * Flush the transmit FIFO of the XMAC specified by the index 'Port' + * enables / disables promiscuous mode by setting Mode Register (XMAC) or + * Receive Control Register (GMAC) dep. on board type * * Returns: * nothing */ -void SkXmFlushTxFifo( +void SkMacPromiscMode( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ -int Port) /* The XMAC to handle with belongs to this Port (MAC_1 + n) */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL Enable) /* Enable / Disable */ { + SK_U16 RcReg; SK_U32 MdReg; - XM_IN32(IoC, Port, XM_MODE, &MdReg); - MdReg |= XM_MD_FTF; - XM_OUT32(IoC, Port, XM_MODE, MdReg); -} /* SkXmFlushTxFifo */ + if (pAC->GIni.GIGenesis) { + + XM_IN32(IoC, Port, XM_MODE, &MdReg); + /* enable or disable promiscuous mode */ + if (Enable) { + MdReg |= XM_MD_ENA_PROM; + } + else { + MdReg &= ~XM_MD_ENA_PROM; + } + /* setup Mode Register */ + XM_OUT32(IoC, Port, XM_MODE, MdReg); + } + else { + + GM_IN16(IoC, Port, GM_RX_CTRL, &RcReg); + + /* enable or disable unicast and multicast filtering */ + if (Enable) { + RcReg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); + } + else { + RcReg |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); + } + /* setup Receive Control Register */ + GM_OUT16(IoC, Port, GM_RX_CTRL, RcReg); + } +} /* SkMacPromiscMode*/ /****************************************************************************** * - * SkXmFlushRxFifo() - Flush the XMACs receive FIFO + * SkMacHashing() - Enable / Disable Hashing * * Description: - * Flush the receive FIFO of the XMAC specified by the index 'Port' + * enables / disables hashing by setting Mode Register (XMAC) or + * Receive Control Register (GMAC) dep. on board type * * Returns: * nothing */ -void SkXmFlushRxFifo( +void SkMacHashing( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ -int Port) /* The XMAC to handle with belongs to this Port (MAC_1 + n) */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL Enable) /* Enable / Disable */ { + SK_U16 RcReg; SK_U32 MdReg; - XM_IN32(IoC, Port, XM_MODE, &MdReg); - MdReg |= XM_MD_FRF; - XM_OUT32(IoC, Port, XM_MODE, MdReg); -} /* SkXmFlushRxFifo*/ + if (pAC->GIni.GIGenesis) { + + XM_IN32(IoC, Port, XM_MODE, &MdReg); + /* enable or disable hashing */ + if (Enable) { + MdReg |= XM_MD_ENA_HASH; + } + else { + MdReg &= ~XM_MD_ENA_HASH; + } + /* setup Mode Register */ + XM_OUT32(IoC, Port, XM_MODE, MdReg); + } + else { + + GM_IN16(IoC, Port, GM_RX_CTRL, &RcReg); + + /* enable or disable multicast filtering */ + if (Enable) { + RcReg |= GM_RXCR_MCF_ENA; + } + else { + RcReg &= ~GM_RXCR_MCF_ENA; + } + /* setup Receive Control Register */ + GM_OUT16(IoC, Port, GM_RX_CTRL, RcReg); + } +} /* SkMacHashing*/ +#ifdef SK_DIAG /****************************************************************************** * - * SkXmSoftRst() - Do a XMAC software reset + * SkXmSetRxCmd() - Modify the value of the XMAC's Rx Command Register * * Description: - * The PHY registers should not be destroyed during this - * kind of software reset. Therefore the XMAC Software Reset - * (XM_GP_RES_MAC bit in XM_GP_PORT) must not be used! - * - * The software reset is done by - * - disabling the Rx and Tx state maschine, - * - reseting the statistics module, - * - clear all other significant XMAC Mode, - * Command, and Control Registers - * - clearing the Hash Register and the - * Exact Match Address registers, and - * - flushing the XMAC's Rx and Tx FIFOs. - * - * Note: - * Another requirement when stopping the XMAC is to - * avoid sending corrupted frames on the network. - * Disabling the Tx state maschine will NOT interrupt - * the currently transmitted frame. But we must take care - * that the tx FIFO is cleared AFTER the current frame - * is complete sent to the network. + * The features + * - FCS stripping, SK_STRIP_FCS_ON/OFF + * - pad byte stripping, SK_STRIP_PAD_ON/OFF + * - don't set XMR_FS_ERR in status SK_LENERR_OK_ON/OFF + * for inrange length error frames + * - don't set XMR_FS_ERR in status SK_BIG_PK_OK_ON/OFF + * for frames > 1514 bytes + * - enable Rx of own packets SK_SELF_RX_ON/OFF * - * It takes about 12ns to send a frame with 1538 bytes. - * One PCI clock goes at least 15ns (66MHz). Therefore - * after reading XM_GP_PORT back, we are sure that the - * transmitter is disabled AND idle. And this means - * we may flush the transmit FIFO now. + * for incoming packets may be enabled/disabled by this function. + * Additional modes may be added later. + * Multiple modes can be enabled/disabled at the same time. + * The new configuration is written to the Rx Command register immediately. * * Returns: * nothing */ -void SkXmSoftRst( -SK_AC *pAC, /* adapter context */ -SK_IOC IoC, /* IO context */ -int Port) /* port to stop (MAC_1 + n) */ +static void SkXmSetRxCmd( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +int Mode) /* Mode is SK_STRIP_FCS_ON/OFF, SK_STRIP_PAD_ON/OFF, + SK_LENERR_OK_ON/OFF, or SK_BIG_PK_OK_ON/OFF */ { - SK_GEPORT *pPrt; - SK_U16 Word; + SK_U16 OldRxCmd; + SK_U16 RxCmd; - pPrt = &pAC->GIni.GP[Port]; - - /* disable the receiver and transmitter */ - XM_IN16(IoC, Port, XM_MMU_CMD, &Word); - XM_OUT16(IoC, Port, XM_MMU_CMD, Word & ~(XM_MMU_ENA_RX|XM_MMU_ENA_TX)); - - /* reset the statistics module */ - XM_OUT32(IoC, Port, XM_GP_PORT, XM_GP_RES_STAT); + XM_IN16(IoC, Port, XM_RX_CMD, &OldRxCmd); - /* - * clear all other significant XMAC Mode, - * Command, and Control Registers - */ - XM_OUT16(IoC, Port, XM_IMSK, 0xffff); /* disable all IRQs */ - XM_OUT32(IoC, Port, XM_MODE, 0x00000000); /* clear Mode Reg */ - XM_OUT16(IoC, Port, XM_TX_CMD, 0x0000); /* reset TX CMD Reg */ - XM_OUT16(IoC, Port, XM_RX_CMD, 0x0000); /* reset RX CMD Reg */ + RxCmd = OldRxCmd; + + switch (Mode & (SK_STRIP_FCS_ON | SK_STRIP_FCS_OFF)) { + case SK_STRIP_FCS_ON: + RxCmd |= XM_RX_STRIP_FCS; + break; + case SK_STRIP_FCS_OFF: + RxCmd &= ~XM_RX_STRIP_FCS; + break; + } + + switch (Mode & (SK_STRIP_PAD_ON | SK_STRIP_PAD_OFF)) { + case SK_STRIP_PAD_ON: + RxCmd |= XM_RX_STRIP_PAD; + break; + case SK_STRIP_PAD_OFF: + RxCmd &= ~XM_RX_STRIP_PAD; + break; + } + + switch (Mode & (SK_LENERR_OK_ON | SK_LENERR_OK_OFF)) { + case SK_LENERR_OK_ON: + RxCmd |= XM_RX_LENERR_OK; + break; + case SK_LENERR_OK_OFF: + RxCmd &= ~XM_RX_LENERR_OK; + break; + } + + switch (Mode & (SK_BIG_PK_OK_ON | SK_BIG_PK_OK_OFF)) { + case SK_BIG_PK_OK_ON: + RxCmd |= XM_RX_BIG_PK_OK; + break; + case SK_BIG_PK_OK_OFF: + RxCmd &= ~XM_RX_BIG_PK_OK; + break; + } + + switch (Mode & (SK_SELF_RX_ON | SK_SELF_RX_OFF)) { + case SK_SELF_RX_ON: + RxCmd |= XM_RX_SELF_RX; + break; + case SK_SELF_RX_OFF: + RxCmd &= ~XM_RX_SELF_RX; + break; + } + + /* Write the new mode to the Rx command register if required */ + if (OldRxCmd != RxCmd) { + XM_OUT16(IoC, Port, XM_RX_CMD, RxCmd); + } +} /* SkXmSetRxCmd */ + + +/****************************************************************************** + * + * SkGmSetRxCmd() - Modify the value of the GMAC's Rx Control Register + * + * Description: + * The features + * - FCS (CRC) stripping, SK_STRIP_FCS_ON/OFF + * - don't set XMR_FS_ERR in status SK_BIG_PK_OK_ON/OFF + * for frames > 1514 bytes + * - enable Rx of own packets SK_SELF_RX_ON/OFF + * + * for incoming packets may be enabled/disabled by this function. + * Additional modes may be added later. + * Multiple modes can be enabled/disabled at the same time. + * The new configuration is written to the Rx Command register immediately. + * + * Returns: + * nothing + */ +static void SkGmSetRxCmd( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +int Mode) /* Mode is SK_STRIP_FCS_ON/OFF, SK_STRIP_PAD_ON/OFF, + SK_LENERR_OK_ON/OFF, or SK_BIG_PK_OK_ON/OFF */ +{ + SK_U16 OldRxCmd; + SK_U16 RxCmd; + + if ((Mode & (SK_STRIP_FCS_ON | SK_STRIP_FCS_OFF)) != 0) { + + GM_IN16(IoC, Port, GM_RX_CTRL, &OldRxCmd); + + RxCmd = OldRxCmd; + + if ((Mode & SK_STRIP_FCS_ON) != 0) { + RxCmd |= GM_RXCR_CRC_DIS; + } + else { + RxCmd &= ~GM_RXCR_CRC_DIS; + } + /* Write the new mode to the Rx control register if required */ + if (OldRxCmd != RxCmd) { + GM_OUT16(IoC, Port, GM_RX_CTRL, RxCmd); + } + } + + if ((Mode & (SK_BIG_PK_OK_ON | SK_BIG_PK_OK_OFF)) != 0) { + + GM_IN16(IoC, Port, GM_SERIAL_MODE, &OldRxCmd); + + RxCmd = OldRxCmd; + + if ((Mode & SK_BIG_PK_OK_ON) != 0) { + RxCmd |= GM_SMOD_JUMBO_ENA; + } + else { + RxCmd &= ~GM_SMOD_JUMBO_ENA; + } + /* Write the new mode to the Rx control register if required */ + if (OldRxCmd != RxCmd) { + GM_OUT16(IoC, Port, GM_SERIAL_MODE, RxCmd); + } + } +} /* SkGmSetRxCmd */ + + +/****************************************************************************** + * + * SkMacSetRxCmd() - Modify the value of the MAC's Rx Control Register + * + * Description: modifies the MAC's Rx Control reg. dep. on board type + * + * Returns: + * nothing + */ +void SkMacSetRxCmd( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +int Mode) /* Rx Mode */ +{ + if (pAC->GIni.GIGenesis) { + + SkXmSetRxCmd(pAC, IoC, Port, Mode); + } + else { + + SkGmSetRxCmd(pAC, IoC, Port, Mode); + } +} /* SkMacSetRxCmd */ + + +/****************************************************************************** + * + * SkMacCrcGener() - Enable / Disable CRC Generation + * + * Description: enables / disables CRC generation dep. on board type + * + * Returns: + * nothing + */ +void SkMacCrcGener( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL Enable) /* Enable / Disable */ +{ + SK_U16 Word; + + if (pAC->GIni.GIGenesis) { + + XM_IN16(IoC, Port, XM_TX_CMD, &Word); + + if (Enable) { + Word &= ~XM_TX_NO_CRC; + } + else { + Word |= XM_TX_NO_CRC; + } + /* setup Tx Command Register */ + XM_OUT16(pAC, Port, XM_TX_CMD, Word); + } + else { + + GM_IN16(IoC, Port, GM_TX_CTRL, &Word); + + if (Enable) { + Word &= ~GM_TXCR_CRC_DIS; + } + else { + Word |= GM_TXCR_CRC_DIS; + } + /* setup Tx Control Register */ + GM_OUT16(IoC, Port, GM_TX_CTRL, Word); + } +} /* SkMacCrcGener*/ + +#endif /* SK_DIAG */ + + +/****************************************************************************** + * + * SkXmClrExactAddr() - Clear Exact Match Address Registers + * + * Description: + * All Exact Match Address registers of the XMAC 'Port' will be + * cleared starting with 'StartNum' up to (and including) the + * Exact Match address number of 'StopNum'. + * + * Returns: + * nothing + */ +void SkXmClrExactAddr( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +int StartNum, /* Begin with this Address Register Index (0..15) */ +int StopNum) /* Stop after finished with this Register Idx (0..15) */ +{ + int i; + SK_U16 ZeroAddr[3] = {0x0000, 0x0000, 0x0000}; + + if ((unsigned)StartNum > 15 || (unsigned)StopNum > 15 || + StartNum > StopNum) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E001, SKERR_HWI_E001MSG); + return; + } + + for (i = StartNum; i <= StopNum; i++) { + XM_OUTADDR(IoC, Port, XM_EXM(i), &ZeroAddr[0]); + } +} /* SkXmClrExactAddr */ + + +/****************************************************************************** + * + * SkMacFlushTxFifo() - Flush the MAC's transmit FIFO + * + * Description: + * Flush the transmit FIFO of the MAC specified by the index 'Port' + * + * Returns: + * nothing + */ +void SkMacFlushTxFifo( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_U32 MdReg; + + if (pAC->GIni.GIGenesis) { + + XM_IN32(IoC, Port, XM_MODE, &MdReg); + + XM_OUT32(IoC, Port, XM_MODE, MdReg | XM_MD_FTF); + } + else { + /* no way to flush the FIFO we have to issue a reset */ + /* TBD */ + } +} /* SkMacFlushTxFifo */ + + +/****************************************************************************** + * + * SkMacFlushRxFifo() - Flush the MAC's receive FIFO + * + * Description: + * Flush the receive FIFO of the MAC specified by the index 'Port' + * + * Returns: + * nothing + */ +void SkMacFlushRxFifo( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_U32 MdReg; + + if (pAC->GIni.GIGenesis) { + + XM_IN32(IoC, Port, XM_MODE, &MdReg); + + XM_OUT32(IoC, Port, XM_MODE, MdReg | XM_MD_FRF); + } + else { + /* no way to flush the FIFO we have to issue a reset */ + /* TBD */ + } +} /* SkMacFlushRxFifo */ + + +/****************************************************************************** + * + * SkXmSoftRst() - Do a XMAC software reset + * + * Description: + * The PHY registers should not be destroyed during this + * kind of software reset. Therefore the XMAC Software Reset + * (XM_GP_RES_MAC bit in XM_GP_PORT) must not be used! + * + * The software reset is done by + * - disabling the Rx and Tx state machine, + * - resetting the statistics module, + * - clear all other significant XMAC Mode, + * Command, and Control Registers + * - clearing the Hash Register and the + * Exact Match Address registers, and + * - flushing the XMAC's Rx and Tx FIFOs. + * + * Note: + * Another requirement when stopping the XMAC is to + * avoid sending corrupted frames on the network. + * Disabling the Tx state machine will NOT interrupt + * the currently transmitted frame. But we must take care + * that the Tx FIFO is cleared AFTER the current frame + * is complete sent to the network. + * + * It takes about 12ns to send a frame with 1538 bytes. + * One PCI clock goes at least 15ns (66MHz). Therefore + * after reading XM_GP_PORT back, we are sure that the + * transmitter is disabled AND idle. And this means + * we may flush the transmit FIFO now. + * + * Returns: + * nothing + */ +static void SkXmSoftRst( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_U16 ZeroAddr[4] = {0x0000, 0x0000, 0x0000, 0x0000}; + + /* reset the statistics module */ + XM_OUT32(IoC, Port, XM_GP_PORT, XM_GP_RES_STAT); + + /* disable all XMAC IRQs */ + XM_OUT16(IoC, Port, XM_IMSK, 0xffff); + + XM_OUT32(IoC, Port, XM_MODE, 0); /* clear Mode Reg */ + + XM_OUT16(IoC, Port, XM_TX_CMD, 0); /* reset TX CMD Reg */ + XM_OUT16(IoC, Port, XM_RX_CMD, 0); /* reset RX CMD Reg */ /* disable all PHY IRQs */ switch (pAC->GIni.GP[Port].PhyType) { case SK_PHY_BCOM: - PHY_WRITE(IoC, pPrt, Port, PHY_BCOM_INT_MASK, 0xffff); + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_INT_MASK, 0xffff); break; +#ifdef OTHER_PHY case SK_PHY_LONE: - PHY_WRITE(IoC, pPrt, Port, PHY_LONE_INT_ENAB, 0x0); + SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_INT_ENAB, 0); break; case SK_PHY_NAT: /* todo: National - PHY_WRITE(IoC, pPrt, Port, PHY_NAT_INT_MASK, - 0xffff); */ + SkXmPhyWrite(pAC, IoC, Port, PHY_NAT_INT_MASK, 0xffff); */ break; +#endif /* OTHER_PHY */ } /* clear the Hash Register */ - SkXmClrHashAddr(pAC, IoC, Port); + XM_OUTHASH(IoC, Port, XM_HSM, &ZeroAddr); /* clear the Exact Match Address registers */ SkXmClrExactAddr(pAC, IoC, Port, 0, 15); - SkXmClrSrcCheck(pAC, IoC, Port); - - /* flush the XMAC's Rx and Tx FIFOs */ - SkXmFlushTxFifo(pAC, IoC, Port); - SkXmFlushRxFifo(pAC, IoC, Port); + + /* clear the Source Check Address registers */ + XM_OUTHASH(IoC, Port, XM_SRC_CHK, &ZeroAddr); - pAC->GIni.GP[Port].PState = SK_PRT_STOP; -} /* SkXmSoftRst*/ +} /* SkXmSoftRst */ /****************************************************************************** @@ -589,23 +1219,21 @@ * * Description: * The XMAC of the specified 'Port' and all connected devices - * (PHY and SERDES) will receive a reset signal on its *Reset - * pins. - * External PHYs must be reset be clearing a bit in the GPIO - * register (Timing requirements: Broadcom: 400ns, Level One: - * none, National: 80ns). + * (PHY and SERDES) will receive a reset signal on its *Reset pins. + * External PHYs must be reset be clearing a bit in the GPIO register + * (Timing requirements: Broadcom: 400ns, Level One: none, National: 80ns). * * ATTENTION: - * It is absolutely neccessary to reset the SW_RST Bit first + * It is absolutely necessary to reset the SW_RST Bit first * before calling this function. * * Returns: * nothing */ -void SkXmHardRst( +static void SkXmHardRst( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ -int Port) /* port to stop (MAC_1 + n) */ +int Port) /* Port Index (MAC_1 + n) */ { SK_U32 Reg; int i; @@ -613,14 +1241,12 @@ SK_U16 Word; for (i = 0; i < 4; i++) { - /* TX_MFF_CTRL1 is a 32 bit register but only the lowest 16 */ - /* bit contains buttoms to press */ - SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), (SK_U16)MFF_CLR_MAC_RST); + /* TX_MFF_CTRL1 has 32 bits, but only the lowest 16 bits are used */ + SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); TOut = 0; do { - TOut ++; - if (TOut > 10000) { + if (TOut++ > 10000) { /* * Adapter seems to be in RESET state. * Registers cannot be written. @@ -628,9 +1254,10 @@ return; } - SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), - (SK_U16) MFF_SET_MAC_RST); - SK_IN16(IoC,MR_ADDR(Port,TX_MFF_CTRL1), &Word); + SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_SET_MAC_RST); + + SK_IN16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), &Word); + } while ((Word & MFF_SET_MAC_RST) == 0); } @@ -652,16 +1279,151 @@ SK_IN32(IoC, B2_GP_IO, &Reg); } - pAC->GIni.GP[Port].PState = SK_PRT_RESET; } /* SkXmHardRst */ /****************************************************************************** * + * SkGmSoftRst() - Do a GMAC software reset + * + * Description: + * The GPHY registers should not be destroyed during this + * kind of software reset. + * + * Returns: + * nothing + */ +static void SkGmSoftRst( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_U16 EmptyHash[4] = {0x0000, 0x0000, 0x0000, 0x0000}; + SK_U16 RxCtrl; + + /* reset the statistics module */ + + /* disable all GMAC IRQs */ + SK_OUT8(IoC, GMAC_IRQ_MSK, 0); + + /* disable all PHY IRQs */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_INT_MASK, 0); + + /* clear the Hash Register */ + GM_OUTHASH(IoC, Port, GM_MC_ADDR_H1, &EmptyHash); + + /* Enable Unicast and Multicast filtering */ + GM_IN16(IoC, Port, GM_RX_CTRL, &RxCtrl); + + GM_OUT16(IoC, Port, GM_RX_CTRL, + RxCtrl | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); + +} /* SkGmSoftRst */ + + +/****************************************************************************** + * + * SkGmHardRst() - Do a GMAC hardware reset + * + * Description: + * + * ATTENTION: + * It is absolutely necessary to reset the SW_RST Bit first + * before calling this function. + * + * Returns: + * nothing + */ +static void SkGmHardRst( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + /* set GPHY Control reset */ + SK_OUT32(IoC, MR_ADDR(Port, GPHY_CTRL), GPC_RST_SET); + + /* set GMAC Control reset */ + SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_RST_SET); + +} /* SkGmHardRst */ + + +/****************************************************************************** + * + * SkMacSoftRst() - Do a MAC software reset + * + * Description: calls a MAC software reset routine dep. on board type + * + * Returns: + * nothing + */ +void SkMacSoftRst( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + + pPrt = &pAC->GIni.GP[Port]; + + /* disable receiver and transmitter */ + SkMacRxTxDisable(pAC, IoC, Port); + + if (pAC->GIni.GIGenesis) { + + SkXmSoftRst(pAC, IoC, Port); + } + else { + + SkGmSoftRst(pAC, IoC, Port); + } + + /* flush the MAC's Rx and Tx FIFOs */ + SkMacFlushTxFifo(pAC, IoC, Port); + + SkMacFlushRxFifo(pAC, IoC, Port); + + pPrt->PState = SK_PRT_STOP; + +} /* SkMacSoftRst */ + + +/****************************************************************************** + * + * SkMacHardRst() - Do a MAC hardware reset + * + * Description: calls a MAC hardware reset routine dep. on board type + * + * Returns: + * nothing + */ +void SkMacHardRst( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + + if (pAC->GIni.GIGenesis) { + + SkXmHardRst(pAC, IoC, Port); + } + else { + + SkGmHardRst(pAC, IoC, Port); + } + + pAC->GIni.GP[Port].PState = SK_PRT_RESET; + +} /* SkMacHardRst */ + + + +/****************************************************************************** + * * SkXmInitMac() - Initialize the XMAC II * * Description: - * Initialize all the XMAC of the specified port. + * Initialize the XMAC of the specified port. * The XMAC must be reset or stopped before calling this function. * * Note: @@ -679,7 +1441,6 @@ SK_U32 Reg; int i; SK_U16 SWord; - SK_U16 PhyId; pPrt = &pAC->GIni.GP[Port]; @@ -687,10 +1448,11 @@ /* Port State: SK_PRT_STOP */ /* Verify that the reset bit is cleared */ SK_IN16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), &SWord); - if (SWord & (SK_U16)MFF_SET_MAC_RST) { + + if ((SWord & MFF_SET_MAC_RST) != 0) { /* PState does not match HW state */ SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E006, SKERR_HWI_E006MSG); - /* Correct it. */ + /* Correct it */ pPrt->PState = SK_PRT_RESET; } } @@ -701,115 +1463,84 @@ * Note: The SW reset is self clearing, therefore there is * nothing to do here. */ - SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), (SK_U16)MFF_CLR_MAC_RST); + SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); - /* Ensure that XMAC reset release is done (errata from LReinbold?). */ + /* Ensure that XMAC reset release is done (errata from LReinbold?) */ SK_IN16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), &SWord); - /* Clear PHY reset. */ - if (pAC->GIni.GP[Port].PhyType != SK_PHY_XMAC) { + /* Clear PHY reset */ + if (pPrt->PhyType != SK_PHY_XMAC) { + SK_IN32(IoC, B2_GP_IO, &Reg); + if (Port == 0) { - Reg |= GP_DIR_0; /* Set to output. */ - Reg |= GP_IO_0; + Reg |= (GP_DIR_0 | GP_IO_0); /* set to output */ } else { - Reg |= GP_DIR_2; /* Set to output. */ - Reg |= GP_IO_2; + Reg |= (GP_DIR_2 | GP_IO_2); /* set to output */ } SK_OUT32(IoC, B2_GP_IO, Reg); - /* Enable GMII interface. */ + /* Enable GMII interface */ XM_OUT16(IoC, Port, XM_HW_CFG, XM_HW_GMII_MD); - PHY_READ(IoC, pPrt, Port, PHY_XMAC_ID1, &PhyId); -#ifdef xDEBUG - if (SWord == 0xFFFF) { - i = 1; - do { - PHY_READ(IoC, pPrt, Port, PHY_XMAC_ID1, &SWord); - i++; - /* Limit retries; else machine may hang. */ - } while (SWord == 0xFFFF && i < 500000); - - CMSMPrintString( - pAC->pConfigTable, - MSG_TYPE_RUNTIME_INFO, - "ID1 is %x after %d reads.", - (void *)SWord, - (void *)i); - - /* Trigger PCI analyzer */ - /* SK_IN32(IoC, 0x012c, &Reg); */ - } -#endif /* DEBUG */ + /* read Id from external PHY (all have the same address) */ + SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_ID1, &pPrt->PhyId1); /* * Optimize MDIO transfer by suppressing preamble. * Must be done AFTER first access to BCOM chip. */ XM_IN16(IoC, Port, XM_MMU_CMD, &SWord); + XM_OUT16(IoC, Port, XM_MMU_CMD, SWord | XM_MMU_NO_PRE); - if (PhyId == 0x6044) { - /* Workaround BCOM Errata for the C0 type. */ - /* Write magic patterns to reserved registers. */ + if (pPrt->PhyId1 == PHY_BCOM_ID1_C0) { + /* + * Workaround BCOM Errata for the C0 type. + * Write magic patterns to reserved registers. + */ i = 0; while (BcomRegC0Hack[i].PhyReg != 0) { - PHY_WRITE(IoC, pPrt, Port, BcomRegC0Hack[i].PhyReg, + SkXmPhyWrite(pAC, IoC, Port, BcomRegC0Hack[i].PhyReg, BcomRegC0Hack[i].PhyVal); i++; } } - else if (PhyId == 0x6041) { - /* Workaround BCOM Errata for the A1 type. */ - /* Write magic patterns to reserved registers. */ + else if (pPrt->PhyId1 == PHY_BCOM_ID1_A1) { + /* + * Workaround BCOM Errata for the A1 type. + * Write magic patterns to reserved registers. + */ i = 0; while (BcomRegA1Hack[i].PhyReg != 0) { - PHY_WRITE(IoC, pPrt, Port, BcomRegA1Hack[i].PhyReg, + SkXmPhyWrite(pAC, IoC, Port, BcomRegA1Hack[i].PhyReg, BcomRegA1Hack[i].PhyVal); i++; } } - /* Workaround BCOM Errata (#10523) for all BCom PHYs. */ - /* Disable Power Management after reset. */ - PHY_READ(IoC, pPrt, Port, PHY_BCOM_AUX_CTRL, &SWord); -#ifdef xDEBUG - if (SWord == 0xFFFF) { - i = 1; - do { - PHY_READ(IoC, pPrt, Port, PHY_BCOM_AUX_CTRL, &SWord); - i++; - /* Limit retries; else machine may hang. */ - } while (SWord == 0xFFFF && i < 500000); - - CMSMPrintString( - pAC->pConfigTable, - MSG_TYPE_RUNTIME_INFO, - "AUX_CTRL is %x after %d reads.", - (void *)SWord, - (void *)i); - - /* Trigger PCI analyzer */ - /* SK_IN32(IoC, 0x012c, &Reg); */ - } -#endif /* DEBUG */ - PHY_WRITE(IoC, pPrt, Port, PHY_BCOM_AUX_CTRL, - SWord | PHY_B_AC_DIS_PM); + /* + * Workaround BCOM Errata (#10523) for all BCom PHYs. + * Disable Power Management after reset. + */ + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &SWord); + + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, + (SK_U16)(SWord | PHY_B_AC_DIS_PM)); - /* PHY LED initialization is done in SkGeXmitLED(), not here. */ + /* PHY LED initialization is done in SkGeXmitLED() */ } /* Dummy read the Interrupt source register */ XM_IN16(IoC, Port, XM_ISRC, &SWord); /* - * The autonegotiation process starts immediately after - * clearing the reset. The autonegotiation process should be + * The auto-negotiation process starts immediately after + * clearing the reset. The auto-negotiation process should be * started by the SIRQ, therefore stop it here immediately. */ - SkXmInitPhy(pAC, IoC, Port, SK_FALSE); + SkMacInitPhy(pAC, IoC, Port, SK_FALSE); #if 0 /* temp. code: enable signal detect */ @@ -820,15 +1551,16 @@ /* * configure the XMACs Station Address - * B2_MAC_2 = xx xx xx xx xx x1 is programed to XMAC A - * B2_MAC_3 = xx xx xx xx xx x2 is programed to XMAC B + * B2_MAC_2 = xx xx xx xx xx x1 is programmed to XMAC A + * B2_MAC_3 = xx xx xx xx xx x2 is programmed to XMAC B */ for (i = 0; i < 3; i++) { /* * The following 2 statements are together endianess - * independant. Remember this when changing. + * independent. Remember this when changing. */ SK_IN16(IoC, (B2_MAC_2 + Port * 8 + i * 2), &SWord); + XM_OUT16(IoC, Port, (XM_SA + i * 2), SWord); } @@ -840,49 +1572,54 @@ /* Rx Low Water Mark (XM_RX_LO_WM): use default */ /* configure Rx High Water Mark (XM_RX_HI_WM) */ - XM_OUT16(IoC, Port, XM_RX_HI_WM, 0x05aa); + XM_OUT16(IoC, Port, XM_RX_HI_WM, SK_XM_RX_HI_WM); + + /* Configure Tx Request Threshold */ + SWord = SK_XM_THR_SL; /* for single port */ if (pAC->GIni.GIMacsFound > 1) { switch (pAC->GIni.GIPortUsage) { case SK_RED_LINK: - /* Configure Tx Request Threshold for red. link */ - XM_OUT16(IoC, Port, XM_TX_THR, SK_XM_THR_REDL); + SWord = SK_XM_THR_REDL; /* redundant link */ break; case SK_MUL_LINK: - /* Configure Tx Request Threshold for load bal. */ - XM_OUT16(IoC, Port, XM_TX_THR, SK_XM_THR_MULL); + SWord = SK_XM_THR_MULL; /* load balancing */ break; case SK_JUMBO_LINK: - /* Configure Tx Request Threshold for jumbo frames */ - XM_OUT16(IoC, Port, XM_TX_THR, SK_XM_THR_JUMBO); + SWord = SK_XM_THR_JUMBO; /* jumbo frames */ break; default: - SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E014, - SKERR_HWI_E014MSG); + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E014, SKERR_HWI_E014MSG); break; } } - else { - /* Configure Tx Request Threshold for single port */ - XM_OUT16(IoC, Port, XM_TX_THR, SK_XM_THR_SL); - } + XM_OUT16(IoC, Port, XM_TX_THR, SWord); - /* - * setup register defaults for the Rx Command Register - * - Enable Automatic Frame Padding on Tx side - */ + /* setup register defaults for the Tx Command Register */ XM_OUT16(IoC, Port, XM_TX_CMD, XM_TX_AUTO_PAD); - /* - * setup register defaults for the Rx Command Register, - * program value of PRxCmd - */ - XM_OUT16(IoC, Port, XM_RX_CMD, pPrt->PRxCmd); + /* setup register defaults for the Rx Command Register */ + SWord = XM_RX_STRIP_FCS | XM_RX_LENERR_OK; + + if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) { + SWord |= XM_RX_BIG_PK_OK; + } + + if (pPrt->PLinkModeConf == SK_LMODE_HALF) { + /* + * If in manual half duplex mode the other side might be in + * full duplex mode, so ignore if a carrier extension is not seen + * on frames received + */ + SWord |= XM_RX_DIS_CEXT; + } + + XM_OUT16(IoC, Port, XM_RX_CMD, SWord); /* * setup register defaults for the Mode Register * - Don't strip error frames to avoid Store & Forward - * on the rx side. + * on the Rx side. * - Enable 'Check Station Address' bit * - Enable 'Check Address Array' bit */ @@ -906,15 +1643,226 @@ * Do NOT init XMAC interrupt mask here. * All interrupts remain disable until link comes up! */ - pPrt->PState = SK_PRT_INIT; /* * Any additional configuration changes may be done now. - * The last action is to enable the rx and tx state machine. - * This should be done after the autonegotiation process + * The last action is to enable the Rx and Tx state machine. + * This should be done after the auto-negotiation process * has been completed successfully. */ -} /* SkXmInitMac*/ +} /* SkXmInitMac */ + +/****************************************************************************** + * + * SkGmInitMac() - Initialize the GMAC + * + * Description: + * Initialize the GMAC of the specified port. + * The GMAC must be reset or stopped before calling this function. + * + * Note: + * The GMAC's Rx and Tx state machine is still disabled when returning. + * + * Returns: + * nothing + */ +void SkGmInitMac( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + int i; + SK_U16 SWord; + SK_U32 DWord; + + pPrt = &pAC->GIni.GP[Port]; + + if (pPrt->PState == SK_PRT_STOP) { + /* Port State: SK_PRT_STOP */ + /* Verify that the reset bit is cleared */ + SK_IN32(IoC, MR_ADDR(Port, GMAC_CTRL), &DWord); + + if ((DWord & GMC_RST_SET) != 0) { + /* PState does not match HW state */ + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E006, SKERR_HWI_E006MSG); + /* Correct it */ + pPrt->PState = SK_PRT_RESET; + } + } + + if (pPrt->PState == SK_PRT_RESET) { + /* set GPHY Control reset */ + SK_OUT32(IoC, MR_ADDR(Port, GPHY_CTRL), GPC_RST_SET); + + /* set GMAC Control reset */ + SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_RST_SET); + + /* clear GMAC Control reset */ + SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_RST_CLR); + + /* set GMAC Control reset */ + SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_RST_SET); + + /* set HWCFG_MODE */ + DWord = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | + GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE | + (pAC->GIni.GICopperType ? GPC_HWCFG_GMII_COP : + GPC_HWCFG_GMII_FIB); + + /* set GPHY Control reset */ + SK_OUT32(IoC, MR_ADDR(Port, GPHY_CTRL), DWord | GPC_RST_SET); + + /* release GPHY Control reset */ + SK_OUT32(IoC, MR_ADDR(Port, GPHY_CTRL), DWord | GPC_RST_CLR); + + /* clear GMAC Control reset */ + SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); + + /* Dummy read the Interrupt source register */ + SK_IN16(IoC, GMAC_IRQ_SRC, &SWord); + +#ifndef VCPU + /* read Id from PHY */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_ID1, &pPrt->PhyId1); + + SkGmInitPhyMarv(pAC, IoC, Port, SK_FALSE); +#endif /* VCPU */ + } + + (void)SkGmResetCounter(pAC, IoC, Port); + + SWord = 0; + + /* speed settings */ + switch (pPrt->PLinkSpeed) { + case SK_LSPEED_AUTO: + /* auto update for speed is already set */ + break; + case SK_LSPEED_1000MBPS: + SWord |= GM_GPCR_SPEED_1000; + break; + case SK_LSPEED_100MBPS: + SWord |= GM_GPCR_SPEED_100; + break; + case SK_LSPEED_10MBPS: + break; + } + + /* duplex settings */ + if (pPrt->PLinkModeConf == SK_LMODE_FULL || + pPrt->PLinkModeConf == SK_LMODE_AUTOSENSE) { + + SWord |= GM_GPCR_DUP_FULL; + } + + /* flow control settings */ + switch (pPrt->PFlowCtrlMode) { + case SK_FLOW_MODE_NONE: + /* disable auto-neg of flow control */ + SWord |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS; + break; + case SK_FLOW_MODE_LOC_SEND: + SWord |= GM_GPCR_FC_RX_DIS; + break; + case SK_FLOW_MODE_SYMMETRIC: + /* TBD */ + case SK_FLOW_MODE_SYM_OR_REM: + /* do nothing means to enable autoneg for flowcontrol and */ + /* enable rx and tx of pause frames */ + break; + } + + /* setup General Purpose Control Register */ + GM_OUT16(IoC, Port, GM_GP_CTRL, SWord); + + /* setup Transmit Control Register */ + GM_OUT16(IoC, Port, GM_TX_CTRL, GM_TXCR_COL_THR); + + /* setup Receive Control Register */ + GM_OUT16(IoC, Port, GM_RX_CTRL, GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA | + GM_RXCR_CRC_DIS); + + /* setup Transmit Flow Control Register */ + GM_OUT16(IoC, Port, GM_TX_FLOW_CTRL, 0xffff); + + /* setup Transmit Parameter Register */ +#ifdef VCPU + GM_IN16(IoC, Port, GM_TX_PARAM, &SWord); +#endif /* VCPU */ + + SWord = JAM_LEN_VAL(3) | JAM_IPG_VAL(11) | IPG_JAM_DATA(26); + + GM_OUT16(IoC, Port, GM_TX_PARAM, SWord); + + /* configure the Serial Mode Register */ +#ifdef VCPU + GM_IN16(IoC, Port, GM_SERIAL_MODE, &SWord); +#endif /* VCPU */ + + SWord = GM_SMOD_VLAN_ENA | IPG_VAL_FAST_ETH; + + if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) { + /* enable jumbo mode (Max. Frame Length = 9018) */ + SWord |= GM_SMOD_JUMBO_ENA; + } + + GM_OUT16(IoC, Port, GM_SERIAL_MODE, SWord); + + /* + * configure the GMACs Station Addresses + * in PROM you can find our addresses at: + * B2_MAC_1 = xx xx xx xx xx x0 virtual address + * B2_MAC_2 = xx xx xx xx xx x1 is programmed to GMAC A + * B2_MAC_3 = xx xx xx xx xx x2 is reserved for DualPort + */ + + for (i = 0; i < 3; i++) { + /* + * The following 2 statements are together endianess + * independent. Remember this when changing. + */ + /* physical address: will be used for pause frames */ + SK_IN16(IoC, (B2_MAC_2 + Port * 8 + i * 2), &SWord); + +#ifdef WA_DEV_16 + /* WA for deviation #16 */ + if (pAC->GIni.GIChipRev == 0) { + /* swap the address bytes */ + SWord = ((SWord & 0xff00) >> 8) | ((SWord & 0x00ff) << 8); + + /* write to register in reversed order */ + GM_OUT16(IoC, Port, (GM_SRC_ADDR_1L + (2 - i) * 4), SWord); + } + else { + GM_OUT16(IoC, Port, (GM_SRC_ADDR_1L + i * 4), SWord); + } +#else + GM_OUT16(IoC, Port, (GM_SRC_ADDR_1L + i * 4), SWord); +#endif /* WA_DEV_16 */ + + /* virtual address: will be used for data */ + SK_IN16(IoC, (B2_MAC_1 + Port * 8 + i * 2), &SWord); + + GM_OUT16(IoC, Port, (GM_SRC_ADDR_2L + i * 4), SWord); + } + + /* reset all Multicast filtering Hash registers */ + GM_OUT16(IoC, Port, GM_MC_ADDR_H1, 0); + GM_OUT16(IoC, Port, GM_MC_ADDR_H2, 0); + GM_OUT16(IoC, Port, GM_MC_ADDR_H3, 0); + GM_OUT16(IoC, Port, GM_MC_ADDR_H4, 0); + + /* enable interrupt mask for counter overflows */ + GM_OUT16(IoC, Port, GM_TX_IRQ_MSK, 0); + GM_OUT16(IoC, Port, GM_RX_IRQ_MSK, 0); + GM_OUT16(IoC, Port, GM_TR_IRQ_MSK, 0); + +#ifdef VCPU + /* read General Purpose Status */ + GM_IN16(IoC, Port, GM_GP_STAT, &SWord); +#endif /* VCPU */ +} /* SkGmInitMac */ /****************************************************************************** @@ -922,9 +1870,9 @@ * SkXmInitDupMd() - Initialize the XMACs Duplex Mode * * Description: - * This function initilaizes the XMACs Duplex Mode. + * This function initializes the XMACs Duplex Mode. * It should be called after successfully finishing - * the Autonegotiation Process + * the Auto-negotiation Process * * Returns: * nothing @@ -939,9 +1887,9 @@ case SK_LMODE_STAT_HALF: /* Configuration Actions for Half Duplex Mode */ /* - * XM_BURST = default value. We are propable not quick + * XM_BURST = default value. We are probable not quick * enough at the 'XMAC' bus to burst 8kB. - * The XMAC stopps bursting if no transmit frames + * The XMAC stops bursting if no transmit frames * are available or the burst limit is exceeded. */ /* XM_TX_RT_LIM = default value (15) */ @@ -969,10 +1917,10 @@ * SkXmInitPauseMd() - initialize the Pause Mode to be used for this port * * Description: - * This function initilaizes the Pause Mode which should + * This function initializes the Pause Mode which should * be used for this port. * It should be called after successfully finishing - * the Autonegotiation Process + * the Auto-negotiation Process * * Returns: * nothing @@ -988,22 +1936,26 @@ pPrt = &pAC->GIni.GP[Port]; + XM_IN16(IoC, Port, XM_MMU_CMD, &Word); + if (pPrt->PFlowCtrlStatus == SK_FLOW_STAT_NONE || pPrt->PFlowCtrlStatus == SK_FLOW_STAT_LOC_SEND) { /* Disable Pause Frame Reception */ - XM_IN16(IoC, Port, XM_MMU_CMD, &Word); - XM_OUT16(IoC, Port, XM_MMU_CMD, Word | XM_MMU_IGN_PF); + Word |= XM_MMU_IGN_PF; } else { /* - * enabling pause frame reception is required for 1000BT + * enabling pause frame reception is required for 1000BT * because the XMAC is not reset if the link is going down */ /* Enable Pause Frame Reception */ - XM_IN16(IoC, Port, XM_MMU_CMD, &Word); - XM_OUT16(IoC, Port, XM_MMU_CMD, Word & ~XM_MMU_IGN_PF); + Word &= ~XM_MMU_IGN_PF; } + + XM_OUT16(IoC, Port, XM_MMU_CMD, Word); + + XM_IN32(IoC, Port, XM_MODE, &DWord); if (pPrt->PFlowCtrlStatus == SK_FLOW_STAT_SYMMETRIC || pPrt->PFlowCtrlStatus == SK_FLOW_STAT_LOC_SEND) { @@ -1011,11 +1963,10 @@ /* * Configure Pause Frame Generation * Use internal and external Pause Frame Generation. - * Sending pause frames is edge triggert. Send a - * Pause frame with the maximum pause time if - * internal oder external FIFO full condition - * occurs. Send a zero pause time frame to - * start transmission again. + * Sending pause frames is edge triggered. + * Send a Pause frame with the maximum pause time if + * internal oder external FIFO full condition occurs. + * Send a zero pause time frame to re-start transmission. */ /* XM_PAUSE_DA = '010000C28001' (default) */ @@ -1025,71 +1976,32 @@ XM_OUT16(IoC, Port, XM_MAC_PTIME, 0xffff); /* Set Pause Mode in Mode Register */ - XM_IN32(IoC, Port, XM_MODE, &DWord); - XM_OUT32(IoC, Port, XM_MODE, DWord | XM_PAUSE_MODE); + DWord |= XM_PAUSE_MODE; /* Set Pause Mode in MAC Rx FIFO */ - SK_OUT16(IoC, MR_ADDR(Port,RX_MFF_CTRL1), MFF_ENA_PAUSE); + SK_OUT16(IoC, MR_ADDR(Port, RX_MFF_CTRL1), MFF_ENA_PAUSE); } else { /* - * disable pause frame generation is required for 1000BT + * disable pause frame generation is required for 1000BT * because the XMAC is not reset if the link is going down */ /* Disable Pause Mode in Mode Register */ - XM_IN32(IoC, Port, XM_MODE, &DWord); - XM_OUT32(IoC, Port, XM_MODE, DWord & ~XM_PAUSE_MODE); + DWord &= ~XM_PAUSE_MODE; /* Disable Pause Mode in MAC Rx FIFO */ - SK_OUT16(IoC, MR_ADDR(Port,RX_MFF_CTRL1), MFF_DIS_PAUSE); + SK_OUT16(IoC, MR_ADDR(Port, RX_MFF_CTRL1), MFF_DIS_PAUSE); } + + XM_OUT32(IoC, Port, XM_MODE, DWord); } /* SkXmInitPauseMd*/ /****************************************************************************** * - * SkXmInitPhy() - Initialize the XMAC II Phy registers - * - * Description: - * Initialize all the XMACs Phy registers - * - * Note: - * - * Returns: - * nothing - */ -void SkXmInitPhy( -SK_AC *pAC, /* adapter context */ -SK_IOC IoC, /* IO context */ -int Port, /* Port Index (MAC_1 + n) */ -SK_BOOL DoLoop) /* Should a Phy LOOback be set-up? */ -{ - SK_GEPORT *pPrt; - - pPrt = &pAC->GIni.GP[Port]; - switch (pPrt->PhyType) { - case SK_PHY_XMAC: - SkXmInitPhyXmac(pAC, IoC, Port, DoLoop); - break; - case SK_PHY_BCOM: - SkXmInitPhyBcom(pAC, IoC, Port, DoLoop); - break; - case SK_PHY_LONE: - SkXmInitPhyLone(pAC, IoC, Port, DoLoop); - break; - case SK_PHY_NAT: - SkXmInitPhyNat(pAC, IoC, Port, DoLoop); - break; - } -} /* SkXmInitPhy*/ - - -/****************************************************************************** - * - * SkXmInitPhyXmac() - Initialize the XMAC II Phy registers + * SkXmInitPhyXmac() - Initialize the XMAC Phy registers * - * Description: - * Initialize all the XMACs Phy registers + * Description: initializes all the XMACs Phy registers * * Note: * @@ -1100,32 +2012,32 @@ SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ int Port, /* Port Index (MAC_1 + n) */ -SK_BOOL DoLoop) /* Should a Phy LOOback be set-up? */ +SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */ { SK_GEPORT *pPrt; SK_U16 Ctrl; pPrt = &pAC->GIni.GP[Port]; - - /* Autonegotiation ? */ - if (pPrt->PLinkMode == SK_LMODE_HALF || - pPrt->PLinkMode == SK_LMODE_FULL) { - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("InitPhyXmac: no autonegotiation Port %d\n", Port)); - /* No Autonegiotiation */ + Ctrl = 0; + + /* Auto-negotiation ? */ + if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("InitPhyXmac: no auto-negotiation Port %d\n", Port)); /* Set DuplexMode in Config register */ - Ctrl = (pPrt->PLinkMode == SK_LMODE_FULL ? PHY_CT_DUP_MD : 0); + if (pPrt->PLinkMode == SK_LMODE_FULL) { + Ctrl |= PHY_CT_DUP_MD; + } /* - * Do NOT enable Autonegotiation here. This would hold - * the link down because no IDLES are transmitted + * Do NOT enable Auto-negotiation here. This would hold + * the link down because no IDLEs are transmitted */ } else { - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("InitPhyXmac: with autonegotiation Port %d\n", Port)); - /* Set Autonegotiation advertisement */ - Ctrl = 0; + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("InitPhyXmac: with auto-negotiation Port %d\n", Port)); + /* Set Auto-negotiation advertisement */ /* Set Full/half duplex capabilities */ switch (pPrt->PLinkMode) { @@ -1139,8 +2051,8 @@ Ctrl |= PHY_X_AN_FD | PHY_X_AN_HD; break; default: - SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, - SKERR_HWI_E015, SKERR_HWI_E015MSG); + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015, + SKERR_HWI_E015MSG); } switch (pPrt->PFlowCtrlMode) { @@ -1157,14 +2069,14 @@ Ctrl |= PHY_X_P_BOTH_MD; break; default: - SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, - SKERR_HWI_E016, SKERR_HWI_E016MSG); + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016, + SKERR_HWI_E016MSG); } /* Write AutoNeg Advertisement Register */ - PHY_WRITE(IoC, pPrt, Port, PHY_XMAC_AUNE_ADV, Ctrl); + SkXmPhyWrite(pAC, IoC, Port, PHY_XMAC_AUNE_ADV, Ctrl); - /* Restart Autonegotiation */ + /* Restart Auto-negotiation */ Ctrl = PHY_CT_ANE | PHY_CT_RE_CFG; } @@ -1174,16 +2086,15 @@ } /* Write to the Phy control register */ - PHY_WRITE(IoC, pPrt, Port, PHY_XMAC_CTRL, Ctrl); -} /* SkXmInitPhyXmac*/ + SkXmPhyWrite(pAC, IoC, Port, PHY_XMAC_CTRL, Ctrl); +} /* SkXmInitPhyXmac */ /****************************************************************************** * * SkXmInitPhyBcom() - Initialize the Broadcom Phy registers * - * Description: - * Initialize all the Broadcom Phy registers + * Description: initializes all the Broadcom Phy registers * * Note: * @@ -1194,7 +2105,7 @@ SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ int Port, /* Port Index (MAC_1 + n) */ -SK_BOOL DoLoop) /* Should a Phy LOOback be set-up? */ +SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */ { SK_GEPORT *pPrt; SK_U16 Ctrl1; @@ -1203,7 +2114,7 @@ SK_U16 Ctrl4; SK_U16 Ctrl5; - Ctrl1 = PHY_B_CT_SP1000; + Ctrl1 = PHY_CT_SP1000; Ctrl2 = 0; Ctrl3 = PHY_SEL_TYPE; Ctrl4 = PHY_B_PEC_EN_LTR; @@ -1213,120 +2124,452 @@ /* manually Master/Slave ? */ if (pPrt->PMSMode != SK_MS_MODE_AUTO) { - Ctrl2 |= PHY_B_1000C_MSE; + Ctrl2 |= PHY_B_1000C_MSE; + + if (pPrt->PMSMode == SK_MS_MODE_MASTER) { + Ctrl2 |= PHY_B_1000C_MSC; + } + } + /* Auto-negotiation ? */ + if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("InitPhyBcom: no auto-negotiation Port %d\n", Port)); + /* Set DuplexMode in Config register */ + Ctrl1 |= (pPrt->PLinkMode == SK_LMODE_FULL ? PHY_CT_DUP_MD : 0); + + /* Determine Master/Slave manually if not already done */ + if (pPrt->PMSMode == SK_MS_MODE_AUTO) { + Ctrl2 |= PHY_B_1000C_MSE; /* set it to Slave */ + } + + /* + * Do NOT enable Auto-negotiation here. This would hold + * the link down because no IDLES are transmitted + */ + } + else { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("InitPhyBcom: with auto-negotiation Port %d\n", Port)); + /* Set Auto-negotiation advertisement */ + + /* + * Workaround BCOM Errata #1 for the C5 type. + * 1000Base-T Link Acquisition Failure in Slave Mode + * Set Repeater/DTE bit 10 of the 1000Base-T Control Register + */ + Ctrl2 |= PHY_B_1000C_RD; + + /* Set Full/half duplex capabilities */ + switch (pPrt->PLinkMode) { + case SK_LMODE_AUTOHALF: + Ctrl2 |= PHY_B_1000C_AHD; + break; + case SK_LMODE_AUTOFULL: + Ctrl2 |= PHY_B_1000C_AFD; + break; + case SK_LMODE_AUTOBOTH: + Ctrl2 |= PHY_B_1000C_AFD | PHY_B_1000C_AHD; + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015, + SKERR_HWI_E015MSG); + } + + switch (pPrt->PFlowCtrlMode) { + case SK_FLOW_MODE_NONE: + Ctrl3 |= PHY_B_P_NO_PAUSE; + break; + case SK_FLOW_MODE_LOC_SEND: + Ctrl3 |= PHY_B_P_ASYM_MD; + break; + case SK_FLOW_MODE_SYMMETRIC: + Ctrl3 |= PHY_B_P_SYM_MD; + break; + case SK_FLOW_MODE_SYM_OR_REM: + Ctrl3 |= PHY_B_P_BOTH_MD; + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016, + SKERR_HWI_E016MSG); + } + + /* Restart Auto-negotiation */ + Ctrl1 |= PHY_CT_ANE | PHY_CT_RE_CFG; + } + + /* Initialize LED register here? */ + /* No. Please do it in SkDgXmitLed() (if required) and swap + init order of LEDs and XMAC. (MAl) */ + + /* Write 1000Base-T Control Register */ + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_1000T_CTRL, Ctrl2); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("1000B-T Ctrl Reg=0x%04X\n", Ctrl2)); + + /* Write AutoNeg Advertisement Register */ + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUNE_ADV, Ctrl3); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Auto-Neg. Adv. Reg=0x%04X\n", Ctrl3)); + + if (DoLoop) { + /* Set the Phy Loopback bit, too */ + Ctrl1 |= PHY_CT_LOOP; + } + + if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) { + /* configure FIFO to high latency for transmission of ext. packets */ + Ctrl4 |= PHY_B_PEC_HIGH_LA; + + /* configure reception of extended packets */ + Ctrl5 |= PHY_B_AC_LONG_PACK; + + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, Ctrl5); + } + + /* Configure LED Traffic Mode and Jumbo Frame usage if specified */ + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_P_EXT_CTRL, Ctrl4); + + /* Write to the Phy control register */ + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_CTRL, Ctrl1); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("PHY Control Reg=0x%04X\n", Ctrl1)); +} /* SkXmInitPhyBcom */ + + +/****************************************************************************** + * + * SkGmInitPhyMarv() - Initialize the Marvell Phy registers + * + * Description: initializes all the Marvell Phy registers + * + * Note: + * + * Returns: + * nothing + */ +static void SkGmInitPhyMarv( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */ +{ + SK_GEPORT *pPrt; + SK_U16 PhyCtrl; + SK_U16 PhyStat; + SK_U16 PhyStat1; + SK_U16 PhySpec; + SK_U16 C1000BaseT; + SK_U16 AutoNegAdv; + SK_U16 ExtPhyCtrl; + +#ifdef VCPU + VCPUprintf(0, "SkGmInitPhyMarv(), Port=%u, DoLoop=%u\n", + Port, DoLoop); +#else /* VCPU */ + + if (!DoLoop) { + /* Read Ext. PHY Specific Control */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_EXT_CTRL, &ExtPhyCtrl); + + ExtPhyCtrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | + PHY_M_EC_MAC_S_MSK); + + ExtPhyCtrl |= PHY_M_EC_M_DSC(1) | PHY_M_EC_S_DSC(1) | + PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); + + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_CTRL, ExtPhyCtrl); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Ext.PHYCtrl=0x%04X\n", ExtPhyCtrl)); + + /* Read PHY Control */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &PhyCtrl); + + /* Assert software reset */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, PhyCtrl | PHY_CT_RESET); + } +#endif /* VCPU */ + + pPrt = &pAC->GIni.GP[Port]; + + PhyCtrl = PHY_CT_COL_TST; + C1000BaseT = 0; + AutoNegAdv = PHY_SEL_TYPE; + + /* manually Master/Slave ? */ + if (pPrt->PMSMode != SK_MS_MODE_AUTO) { + /* enable Manual Master/Slave */ + C1000BaseT |= PHY_M_1000C_MSE; + if (pPrt->PMSMode == SK_MS_MODE_MASTER) { - Ctrl2 |= PHY_B_1000C_MSC; + C1000BaseT |= PHY_M_1000C_MSC; /* set it to Master */ } } - /* Autonegotiation ? */ - if (pPrt->PLinkMode == SK_LMODE_HALF || - pPrt->PLinkMode == SK_LMODE_FULL) { - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("InitPhyBcom: no autonegotiation Port %d\n", Port)); - /* No Autonegiotiation */ - /* Set DuplexMode in Config register */ - Ctrl1 |= (pPrt->PLinkMode == SK_LMODE_FULL ? PHY_CT_DUP_MD : 0); + + /* Auto-negotiation ? */ + if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("InitPhyMarv: no auto-negotiation Port %d\n", Port)); + + if (pPrt->PLinkMode == SK_LMODE_FULL) { + /* Set Full Duplex Mode */ + PhyCtrl |= PHY_CT_DUP_MD; + } - /* Determine Master/Slave manually if not already done. */ + /* Set Master/Slave manually if not already done */ if (pPrt->PMSMode == SK_MS_MODE_AUTO) { - Ctrl2 |= PHY_B_1000C_MSE; /* set it to Slave */ + C1000BaseT |= PHY_M_1000C_MSE; /* set it to Slave */ } - /* - * Do NOT enable Autonegotiation here. This would hold - * the link down because no IDLES are transmitted - */ - } - else { - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("InitPhyBcom: with autonegotiation Port %d\n", Port)); - /* Set Autonegotiation advertisement */ - - /* Set Full/half duplex capabilities */ - switch (pPrt->PLinkMode) { - case SK_LMODE_AUTOHALF: - Ctrl2 |= PHY_B_1000C_AHD; + /* Set Speed */ + switch (pPrt->PLinkSpeed) { + case SK_LSPEED_AUTO: + case SK_LSPEED_1000MBPS: + PhyCtrl |= PHY_CT_SP1000; break; - case SK_LMODE_AUTOFULL: - Ctrl2 |= PHY_B_1000C_AFD; + case SK_LSPEED_100MBPS: + PhyCtrl |= PHY_CT_SP100; break; - case SK_LMODE_AUTOBOTH: - Ctrl2 |= PHY_B_1000C_AFD | PHY_B_1000C_AHD; + case SK_LSPEED_10MBPS: break; default: - SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, - SKERR_HWI_E015, SKERR_HWI_E015MSG); + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E019, + SKERR_HWI_E019MSG); } - switch (pPrt->PFlowCtrlMode) { - case SK_FLOW_MODE_NONE: - Ctrl3 |= PHY_B_P_NO_PAUSE; - break; - case SK_FLOW_MODE_LOC_SEND: - Ctrl3 |= PHY_B_P_ASYM_MD; - break; - case SK_FLOW_MODE_SYMMETRIC: - Ctrl3 |= PHY_B_P_SYM_MD; - break; - case SK_FLOW_MODE_SYM_OR_REM: - Ctrl3 |= PHY_B_P_BOTH_MD; - break; - default: - SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, - SKERR_HWI_E016, SKERR_HWI_E016MSG); + if (!DoLoop) { + PhyCtrl |= PHY_CT_RESET; } + /* + * Do NOT enable Auto-negotiation here. This would hold + * the link down because no IDLES are transmitted + */ + } + else { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("InitPhyMarv: with auto-negotiation Port %d\n", Port)); + + PhyCtrl |= PHY_CT_ANE; + + if (pAC->GIni.GICopperType) { + /* Set Speed capabilities */ + switch (pPrt->PLinkSpeed) { + case SK_LSPEED_AUTO: + C1000BaseT |= PHY_M_1000C_AHD | PHY_M_1000C_AFD; + AutoNegAdv |= PHY_M_AN_100_FD | PHY_M_AN_100_HD | + PHY_M_AN_10_FD | PHY_M_AN_10_HD; + break; + case SK_LSPEED_1000MBPS: + C1000BaseT |= PHY_M_1000C_AHD | PHY_M_1000C_AFD; + break; + case SK_LSPEED_100MBPS: + AutoNegAdv |= PHY_M_AN_100_FD | PHY_M_AN_100_HD | + PHY_M_AN_10_FD | PHY_M_AN_10_HD; + break; + case SK_LSPEED_10MBPS: + AutoNegAdv |= PHY_M_AN_10_FD | PHY_M_AN_10_HD; + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E019, + SKERR_HWI_E019MSG); + } - /* Restart Autonegotiation */ - Ctrl1 |= PHY_CT_ANE | PHY_CT_RE_CFG; + /* Set Full/half duplex capabilities */ + switch (pPrt->PLinkMode) { + case SK_LMODE_AUTOHALF: + C1000BaseT &= ~PHY_M_1000C_AFD; + AutoNegAdv &= ~(PHY_M_AN_100_FD | PHY_M_AN_10_FD); + break; + case SK_LMODE_AUTOFULL: + C1000BaseT &= ~PHY_M_1000C_AHD; + AutoNegAdv &= ~(PHY_M_AN_100_HD | PHY_M_AN_10_HD); + break; + case SK_LMODE_AUTOBOTH: + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015, + SKERR_HWI_E015MSG); + } + + /* Set Auto-negotiation advertisement */ + switch (pPrt->PFlowCtrlMode) { + case SK_FLOW_MODE_NONE: + AutoNegAdv |= PHY_B_P_NO_PAUSE; + break; + case SK_FLOW_MODE_LOC_SEND: + AutoNegAdv |= PHY_B_P_ASYM_MD; + break; + case SK_FLOW_MODE_SYMMETRIC: + AutoNegAdv |= PHY_B_P_SYM_MD; + break; + case SK_FLOW_MODE_SYM_OR_REM: + AutoNegAdv |= PHY_B_P_BOTH_MD; + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016, + SKERR_HWI_E016MSG); + } + } + else { /* special defines for FIBER (88E1011S only) */ + + /* Set Full/half duplex capabilities */ + switch (pPrt->PLinkMode) { + case SK_LMODE_AUTOHALF: + AutoNegAdv |= PHY_M_AN_1000X_AHD; + break; + case SK_LMODE_AUTOFULL: + AutoNegAdv |= PHY_M_AN_1000X_AFD; + break; + case SK_LMODE_AUTOBOTH: + AutoNegAdv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD; + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015, + SKERR_HWI_E015MSG); + } + + /* Set Auto-negotiation advertisement */ + switch (pPrt->PFlowCtrlMode) { + case SK_FLOW_MODE_NONE: + AutoNegAdv |= PHY_M_P_NO_PAUSE_X; + break; + case SK_FLOW_MODE_LOC_SEND: + AutoNegAdv |= PHY_M_P_ASYM_MD_X; + break; + case SK_FLOW_MODE_SYMMETRIC: + AutoNegAdv |= PHY_M_P_SYM_MD_X; + break; + case SK_FLOW_MODE_SYM_OR_REM: + AutoNegAdv |= PHY_M_P_BOTH_MD_X; + break; + default: + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016, + SKERR_HWI_E016MSG); + } + } + if (!DoLoop) { + /* Restart Auto-negotiation */ + PhyCtrl |= PHY_CT_RE_CFG; + } } - /* Initialize LED register here? */ - /* No. Please do it in SkDgXmitLed() (if required) and swap - init order of LEDs and XMAC. (MAl) */ +#ifdef VCPU + /* + * E-mail from Gu Lin (08-03-2002): + */ + + /* Program PHY register 30 as 16'h0708 for simulation speed up */ + SkGmPhyWrite(pAC, IoC, Port, 30, 0x0708); + +#if 0 + /* Program PHY register 20 as 16'h2070 */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_CTRL, 0x2070); +#endif /* 0 */ + + VCpuWait(2000); + +#else /* VCPU */ /* Write 1000Base-T Control Register */ - PHY_WRITE(IoC, pPrt, Port, PHY_BCOM_1000T_CTRL, Ctrl2); - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("1000Base-T Control Reg = %x\n", Ctrl2)); + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_1000T_CTRL, C1000BaseT); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("1000B-T Ctrl=0x%04X\n", C1000BaseT)); /* Write AutoNeg Advertisement Register */ - PHY_WRITE(IoC, pPrt, Port, PHY_BCOM_AUNE_ADV, Ctrl3); - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("AutoNeg Advertisment Reg = %x\n", Ctrl3)); + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_AUNE_ADV, AutoNegAdv); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Auto-Neg.Ad.=0x%04X\n", AutoNegAdv)); +#endif /* VCPU */ - if (DoLoop) { - /* Set the Phy Loopback bit, too */ - Ctrl1 |= PHY_CT_LOOP; - } - - if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) { - /* configure fifo to high latency for xmission of ext. packets*/ - Ctrl4 |= PHY_B_PEC_HIGH_LA; + /* Set the PHY Loopback bit */ + PhyCtrl |= PHY_CT_LOOP; - /* configure reception of extended packets */ - Ctrl5 |= PHY_B_AC_LONG_PACK; + /* Program PHY register 16 as 16'h0400 to force link good */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, PHY_M_PC_FL_GOOD); - PHY_WRITE(IoC, pPrt, Port, PHY_BCOM_AUX_CTRL, Ctrl5); +#if 0 + if (pPrt->PLinkSpeed != SK_LSPEED_AUTO) { + /* Write Ext. PHY Specific Control */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_CTRL, + (SK_U16)((pPrt->PLinkSpeed + 2) << 4)); + } + } + else if (pPrt->PLinkSpeed == SK_LSPEED_10MBPS) { + /* Write PHY Specific Control */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, PHY_M_PC_EN_DET_MSK); + } +#endif /* 0 */ } - /* Configure LED Traffic Mode and Jumbo Frame usage if specified */ - PHY_WRITE(IoC, pPrt, Port, PHY_BCOM_P_EXT_CTRL, Ctrl4); + /* Write to the PHY Control register */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, PhyCtrl); + +#ifdef VCPU + VCpuWait(2000); +#endif /* VCPU */ + +#ifdef SK_DIAG + c_print("PHY Ctrl Val=0x%04X\n", PhyCtrl); + c_print("1000 B-T Val=0x%04X\n", C1000BaseT); + c_print("Auto-Neg Val=0x%04X\n", AutoNegAdv); + c_print("Ext Ctrl Val=0x%04X\n", ExtPhyCtrl); +#endif /* SK_DIAG */ + +#ifndef VCPU + /* Read PHY Control */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &PhyCtrl); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("PHY Ctrl Reg.=0x%04X\n", PhyCtrl)); - /* Write to the Phy control register */ - PHY_WRITE(IoC, pPrt, Port, PHY_BCOM_CTRL, Ctrl1); - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("PHY Control Reg = %x\n", Ctrl1)); -} /* SkXmInitPhyBcom */ + /* Read 1000Base-T Control Register */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_1000T_CTRL, &C1000BaseT); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("1000B-T Ctrl =0x%04X\n", C1000BaseT)); + + /* Read AutoNeg Advertisement Register */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_AUNE_ADV, &AutoNegAdv); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Auto-Neg. Ad.=0x%04X\n", AutoNegAdv)); + + /* Read Ext. PHY Specific Control */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_EXT_CTRL, &ExtPhyCtrl); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Ext PHY Ctrl=0x%04X\n", ExtPhyCtrl)); + + /* Read PHY Status */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_STAT, &PhyStat); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("PHY Stat Reg.=0x%04X\n", PhyStat)); + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_STAT, &PhyStat1); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("PHY Stat Reg.=0x%04X\n", PhyStat1)); + + /* Read PHY Specific Status */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_STAT, &PhySpec); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("PHY Spec Stat=0x%04X\n", PhySpec)); +#endif /* VCPU */ + +#ifdef SK_DIAG + c_print("PHY Ctrl Reg=0x%04X\n", PhyCtrl); + c_print("PHY 1000 Reg=0x%04X\n", C1000BaseT); + c_print("PHY AnAd Reg=0x%04X\n", AutoNegAdv); + c_print("Ext Ctrl Reg=0x%04X\n", ExtPhyCtrl); + c_print("PHY Stat Reg=0x%04X\n", PhyStat); + c_print("PHY Stat Reg=0x%04X\n", PhyStat1); + c_print("PHY Spec Reg=0x%04X\n", PhySpec); +#endif /* SK_DIAG */ + +} /* SkGmInitPhyMarv */ +#ifdef OTHER_PHY /****************************************************************************** * * SkXmInitPhyLone() - Initialize the Level One Phy registers * - * Description: - * Initialize all the Level One Phy registers + * Description: initializes all the Level One Phy registers * * Note: * @@ -1337,14 +2580,14 @@ SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ int Port, /* Port Index (MAC_1 + n) */ -SK_BOOL DoLoop) /* Should a Phy LOOback be set-up? */ +SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */ { SK_GEPORT *pPrt; SK_U16 Ctrl1; SK_U16 Ctrl2; SK_U16 Ctrl3; - Ctrl1 = PHY_L_CT_SP1000; + Ctrl1 = PHY_CT_SP1000; Ctrl2 = 0; Ctrl3 = PHY_SEL_TYPE; @@ -1353,39 +2596,36 @@ /* manually Master/Slave ? */ if (pPrt->PMSMode != SK_MS_MODE_AUTO) { Ctrl2 |= PHY_L_1000C_MSE; + if (pPrt->PMSMode == SK_MS_MODE_MASTER) { Ctrl2 |= PHY_L_1000C_MSC; } } - /* Autonegotiation ? */ - if (pPrt->PLinkMode == SK_LMODE_HALF || - pPrt->PLinkMode == SK_LMODE_FULL) { + /* Auto-negotiation ? */ + if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) { /* * level one spec say: "1000Mbps: manual mode not allowed" * but lets see what happens... */ - SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, - 0, "Level One PHY only works with Autoneg"); - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("InitPhyLone: no autonegotiation Port %d\n", Port)); - /* No Autonegiotiation */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("InitPhyLone: no auto-negotiation Port %d\n", Port)); /* Set DuplexMode in Config register */ Ctrl1 = (pPrt->PLinkMode == SK_LMODE_FULL ? PHY_CT_DUP_MD : 0); - /* Determine Master/Slave manually if not already done. */ + /* Determine Master/Slave manually if not already done */ if (pPrt->PMSMode == SK_MS_MODE_AUTO) { Ctrl2 |= PHY_L_1000C_MSE; /* set it to Slave */ } /* - * Do NOT enable Autonegotiation here. This would hold + * Do NOT enable Auto-negotiation here. This would hold * the link down because no IDLES are transmitted */ } else { - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("InitPhyLone: with autonegotiation Port %d\n", Port)); - /* Set Autonegotiation advertisement */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("InitPhyLone: with auto-negotiation Port %d\n", Port)); + /* Set Auto-negotiation advertisement */ /* Set Full/half duplex capabilities */ switch (pPrt->PLinkMode) { @@ -1399,8 +2639,8 @@ Ctrl2 |= PHY_L_1000C_AFD | PHY_L_1000C_AHD; break; default: - SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, - SKERR_HWI_E015, SKERR_HWI_E015MSG); + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015, + SKERR_HWI_E015MSG); } switch (pPrt->PFlowCtrlMode) { @@ -1417,11 +2657,11 @@ Ctrl3 |= PHY_L_P_BOTH_MD; break; default: - SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, - SKERR_HWI_E016, SKERR_HWI_E016MSG); + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016, + SKERR_HWI_E016MSG); } - /* Restart Autonegotiation */ + /* Restart Auto-negotiation */ Ctrl1 = PHY_CT_ANE | PHY_CT_RE_CFG; } @@ -1431,14 +2671,14 @@ init order of LEDs and XMAC. (MAl) */ /* Write 1000Base-T Control Register */ - PHY_WRITE(IoC, pPrt, Port, PHY_LONE_1000T_CTRL, Ctrl2); - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("1000Base-T Control Reg = %x\n", Ctrl2)); + SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_1000T_CTRL, Ctrl2); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("1000B-T Ctrl Reg=0x%04X\n", Ctrl2)); /* Write AutoNeg Advertisement Register */ - PHY_WRITE(IoC, pPrt, Port, PHY_LONE_AUNE_ADV, Ctrl3); - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("AutoNeg Advertisment Reg = %x\n", Ctrl3)); + SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_AUNE_ADV, Ctrl3); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Auto-Neg. Adv. Reg=0x%04X\n", Ctrl3)); if (DoLoop) { @@ -1446,26 +2686,18 @@ Ctrl1 |= PHY_CT_LOOP; } - if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) { - /* - * nothing to do for Level one. - * PHY supports frames up to 10k. - */ - } - /* Write to the Phy control register */ - PHY_WRITE(IoC, pPrt, Port, PHY_LONE_CTRL, Ctrl1); - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("PHY Control Reg = %x\n", Ctrl1)); -} /* SkXmInitPhyLone*/ + SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_CTRL, Ctrl1); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("PHY Control Reg=0x%04X\n", Ctrl1)); +} /* SkXmInitPhyLone */ /****************************************************************************** * * SkXmInitPhyNat() - Initialize the National Phy registers * - * Description: - * Initialize all the National Phy registers + * Description: initializes all the National Phy registers * * Note: * @@ -1476,103 +2708,96 @@ SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ int Port, /* Port Index (MAC_1 + n) */ -SK_BOOL DoLoop) /* Should a Phy LOOback be set-up? */ +SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */ { /* todo: National */ -} /* SkXmInitPhyNat*/ +} /* SkXmInitPhyNat */ +#endif /* OTHER_PHY */ /****************************************************************************** * - * SkXmAutoNegLipaXmac() - Decides whether Link Partner could do autoneg + * SkMacInitPhy() - Initialize the PHY registers * - * This function analyses the Interrupt status word. If any of the - * Autonegotiating interrupt bits are set, the PLipaAutoNeg variable - * is set true. - */ -void SkXmAutoNegLipaXmac( -SK_AC *pAC, /* adapter context */ -SK_IOC IoC, /* IO context */ -int Port, /* Port Index (MAC_1 + n) */ -SK_U16 IStatus) /* Interrupt Status word to analyse */ -{ - SK_GEPORT *pPrt; - - pPrt = &pAC->GIni.GP[Port]; - - if (pPrt->PLipaAutoNeg != SK_LIPA_AUTO && - (IStatus & (XM_IS_LIPA_RC|XM_IS_RX_PAGE|XM_IS_AND))) { - - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("AutoNegLipa: AutoNeg detected on port %d %x\n", Port, IStatus)); - pPrt->PLipaAutoNeg = SK_LIPA_AUTO; - } -} /* SkXmAutoNegLipaXmac*/ - - -/****************************************************************************** + * Description: calls the Init PHY routines dep. on board type * - * SkXmAutoNegLipaBcom() - Decides whether Link Partner could do autoneg + * Note: * - * This function analyses the PHY status word. If any of the - * Autonegotiating bits are set, The PLipaAutoNeg variable - * is set true. + * Returns: + * nothing */ -void SkXmAutoNegLipaBcom( +void SkMacInitPhy( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ int Port, /* Port Index (MAC_1 + n) */ -SK_U16 PhyStat) /* PHY Status word to analyse */ +SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */ { SK_GEPORT *pPrt; pPrt = &pAC->GIni.GP[Port]; - if (pPrt->PLipaAutoNeg != SK_LIPA_AUTO && (PhyStat & PHY_ST_AN_OVER)) { - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("AutoNegLipa: AutoNeg detected on port %d %x\n", Port, PhyStat)); - pPrt->PLipaAutoNeg = SK_LIPA_AUTO; + switch (pPrt->PhyType) { + case SK_PHY_XMAC: + SkXmInitPhyXmac(pAC, IoC, Port, DoLoop); + break; + case SK_PHY_BCOM: + SkXmInitPhyBcom(pAC, IoC, Port, DoLoop); + break; + case SK_PHY_MARV_COPPER: + case SK_PHY_MARV_FIBER: + SkGmInitPhyMarv(pAC, IoC, Port, DoLoop); + break; +#ifdef OTHER_PHY + case SK_PHY_LONE: + SkXmInitPhyLone(pAC, IoC, Port, DoLoop); + break; + case SK_PHY_NAT: + SkXmInitPhyNat(pAC, IoC, Port, DoLoop); + break; +#endif /* OTHER_PHY */ } -} /* SkXmAutoNegLipaBcom*/ +} /* SkMacInitPhy */ +#ifndef SK_DIAG /****************************************************************************** * - * SkXmAutoNegLipaLone() - Decides whether Link Partner could do autoneg + * SkXmAutoNegLipaXmac() - Decides whether Link Partner could do auto-neg * - * This function analyses the PHY status word. If any of the - * Autonegotiating bits are set, The PLipaAutoNeg variable + * This function analyses the Interrupt status word. If any of the + * Auto-negotiating interrupt bits are set, the PLipaAutoNeg variable * is set true. */ -void SkXmAutoNegLipaLone( +void SkXmAutoNegLipaXmac( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ -int Port, /* Port Index (MAC_1 + n) */ -SK_U16 PhyStat) /* PHY Status word to analyse */ +int Port, /* Port Index (MAC_1 + n) */ +SK_U16 IStatus) /* Interrupt Status word to analyse */ { SK_GEPORT *pPrt; pPrt = &pAC->GIni.GP[Port]; if (pPrt->PLipaAutoNeg != SK_LIPA_AUTO && - (PhyStat & (PHY_ST_AN_OVER))) { + (IStatus & (XM_IS_LIPA_RC | XM_IS_RX_PAGE | XM_IS_AND)) != 0) { - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("AutoNegLipa: AutoNeg detected on port %d %x\n", Port, PhyStat)); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegLipa: AutoNeg detected on Port %d, IStatus=0x%04x\n", + Port, IStatus)); pPrt->PLipaAutoNeg = SK_LIPA_AUTO; } -} /* SkXmAutoNegLipaLone*/ +} /* SkXmAutoNegLipaXmac */ /****************************************************************************** * - * SkXmAutoNegLipaNat() - Decides whether Link Partner could do autoneg + * SkMacAutoNegLipaPhy() - Decides whether Link Partner could do auto-neg * - * This function analyses the PHY status word. If any of the - * Autonegotiating bits are set, The PLipaAutoNeg variable + * This function analyses the PHY status word. + * If any of the Auto-negotiating bits are set, the PLipaAutoNeg variable * is set true. */ -void SkXmAutoNegLipaNat( +void SkMacAutoNegLipaPhy( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ int Port, /* Port Index (MAC_1 + n) */ @@ -1583,67 +2808,30 @@ pPrt = &pAC->GIni.GP[Port]; if (pPrt->PLipaAutoNeg != SK_LIPA_AUTO && - (PhyStat & (PHY_ST_AN_OVER))) { + (PhyStat & PHY_ST_AN_OVER) != 0) { - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("AutoNegLipa: AutoNeg detected on port %d %x\n", Port, PhyStat)); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegLipa: AutoNeg detected on Port %d, PhyStat=0x%04x\n", + Port, PhyStat)); pPrt->PLipaAutoNeg = SK_LIPA_AUTO; } -} /* SkXmAutoNegLipaNat*/ - - -/****************************************************************************** - * - * SkXmAutoNegDone() - Auto negotiation handling - * - * Description: - * This function handles the autonegotiation if the Done bit is set. - * - * Note: - * o The XMACs interrupt source register is NOT read here. - * o This function is public because it is used in the diagnostics - * tool, too. - * - * Returns: - * SK_AND_OK o.k. - * SK_AND_DUP_CAP Duplex capability error happened - * SK_AND_OTHER Other error happened - */ -int SkXmAutoNegDone( -SK_AC *pAC, /* adapter context */ -SK_IOC IoC, /* IO context */ -int Port) /* Port Index (MAC_1 + n) */ -{ - switch (pAC->GIni.GP[Port].PhyType) { - case SK_PHY_XMAC: - return (SkXmAutoNegDoneXmac(pAC, IoC, Port)); - case SK_PHY_BCOM: - return (SkXmAutoNegDoneBcom(pAC, IoC, Port)); - case SK_PHY_LONE: - return (SkXmAutoNegDoneLone(pAC, IoC, Port)); - case SK_PHY_NAT: - return (SkXmAutoNegDoneNat(pAC, IoC, Port)); - } - return (SK_AND_OTHER); -} /* SkXmAutoNegDone*/ +} /* SkMacAutoNegLipaPhy */ +#endif /* SK_DIAG */ /****************************************************************************** * - * SkXmAutoNegDoneXmac() - Auto negotiation handling + * SkXmAutoNegDoneXmac() - Auto-negotiation handling * * Description: - * This function handles the autonegotiation if the Done bit is set. - * - * Note: - * o The XMACs interrupt source register is NOT read here. + * This function handles the auto-negotiation if the Done bit is set. * * Returns: * SK_AND_OK o.k. * SK_AND_DUP_CAP Duplex capability error happened * SK_AND_OTHER Other error happened */ -static int SkXmAutoNegDoneXmac( +static int SkXmAutoNegDoneXmac( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ int Port) /* Port Index (MAC_1 + n) */ @@ -1652,22 +2840,22 @@ SK_U16 ResAb; /* Resolved Ability */ SK_U16 LPAb; /* Link Partner Ability */ - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, ("AutoNegDoneXmac" - "Port %d\n",Port)); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegDoneXmac, Port %d\n",Port)); pPrt = &pAC->GIni.GP[Port]; /* Get PHY parameters */ - PHY_READ(IoC, pPrt, Port, PHY_XMAC_AUNE_LP, &LPAb); - PHY_READ(IoC, pPrt, Port, PHY_XMAC_RES_ABI, &ResAb); + SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_AUNE_LP, &LPAb); + SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_RES_ABI, &ResAb); - if (LPAb & PHY_X_AN_RFB) { + if ((LPAb & PHY_X_AN_RFB) != 0) { /* At least one of the remote fault bit is set */ /* Error */ - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("AutoNegFail: Remote fault bit set Port %d\n", Port)); pPrt->PAutoNegFail = SK_TRUE; - return (SK_AND_OTHER); + return(SK_AND_OTHER); } /* Check Duplex mismatch */ @@ -1679,10 +2867,10 @@ } else { /* Error */ - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("AutoNegFail: Duplex mode mismatch port %d\n", Port)); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegFail: Duplex mode mismatch Port %d\n", Port)); pPrt->PAutoNegFail = SK_TRUE; - return (SK_AND_DUP_CAP); + return(SK_AND_DUP_CAP); } /* Check PAUSE mismatch */ @@ -1690,7 +2878,7 @@ /* We are using IEEE 802.3z/D5.0 Table 37-4 */ if ((pPrt->PFlowCtrlMode == SK_FLOW_MODE_SYMMETRIC || pPrt->PFlowCtrlMode == SK_FLOW_MODE_SYM_OR_REM) && - (LPAb & PHY_X_P_SYM_MD)) { + (LPAb & PHY_X_P_SYM_MD) != 0) { /* Symmetric PAUSE */ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC; } @@ -1708,31 +2896,25 @@ /* PAUSE mismatch -> no PAUSE */ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE; } + pPrt->PLinkSpeedUsed = SK_LSPEED_STAT_1000MBPS; - /* We checked everything and may now enable the link */ - pPrt->PAutoNegFail = SK_FALSE; - - SkXmRxTxEnable(pAC, IoC, Port); - return (SK_AND_OK); -} /* SkXmAutoNegDoneXmac*/ + return(SK_AND_OK); +} /* SkXmAutoNegDoneXmac */ /****************************************************************************** * - * SkXmAutoNegDoneBcom() - Auto negotiation handling + * SkXmAutoNegDoneBcom() - Auto-negotiation handling * * Description: - * This function handles the autonegotiation if the Done bit is set. - * - * Note: - * o The XMACs interrupt source register is NOT read here. + * This function handles the auto-negotiation if the Done bit is set. * * Returns: * SK_AND_OK o.k. * SK_AND_DUP_CAP Duplex capability error happened * SK_AND_OTHER Other error happened */ -static int SkXmAutoNegDoneBcom( +static int SkXmAutoNegDoneBcom( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ int Port) /* Port Index (MAC_1 + n) */ @@ -1746,105 +2928,199 @@ SK_U16 ResAb; /* Resolved Ability */ #endif /* 0 */ - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("AutoNegDoneBcom, Port %d\n", Port)); pPrt = &pAC->GIni.GP[Port]; - /* Get PHY parameters. */ - PHY_READ(IoC, pPrt, Port, PHY_BCOM_AUNE_LP, &LPAb); + /* Get PHY parameters */ + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_LP, &LPAb); #if 0 01-Sep-2000 RA;:;: - PHY_READ(IoC, pPrt, Port, PHY_BCOM_1000T_STAT, &ResAb); + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &ResAb); #endif /* 0 */ - PHY_READ(IoC, pPrt, Port, PHY_BCOM_AUX_STAT, &AuxStat); + + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_STAT, &AuxStat); - if (LPAb & PHY_B_AN_RF) { - /* Remote fault bit is set: Error. */ + if ((LPAb & PHY_B_AN_RF) != 0) { + /* Remote fault bit is set: Error */ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("AutoNegFail: Remote fault bit set Port %d\n", Port)); pPrt->PAutoNegFail = SK_TRUE; - return (SK_AND_OTHER); + return(SK_AND_OTHER); } - /* Check Duplex mismatch. */ - if ((AuxStat & PHY_B_AS_AN_RES) == PHY_B_RES_1000FD) { + /* Check Duplex mismatch */ + if ((AuxStat & PHY_B_AS_AN_RES_MSK) == PHY_B_RES_1000FD) { pPrt->PLinkModeStatus = SK_LMODE_STAT_AUTOFULL; } - else if ((AuxStat & PHY_B_AS_AN_RES) == PHY_B_RES_1000HD) { + else if ((AuxStat & PHY_B_AS_AN_RES_MSK) == PHY_B_RES_1000HD) { pPrt->PLinkModeStatus = SK_LMODE_STAT_AUTOHALF; } else { - /* Error. */ - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("AutoNegFail: Duplex mode mismatch port %d\n", Port)); + /* Error */ + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegFail: Duplex mode mismatch Port %d\n", Port)); pPrt->PAutoNegFail = SK_TRUE; - return (SK_AND_DUP_CAP); + return(SK_AND_DUP_CAP); } #if 0 01-Sep-2000 RA;:;: - /* Check Master/Slave resolution. */ - if (ResAb & PHY_B_1000S_MSF) { - /* Error. */ + /* Check Master/Slave resolution */ + if ((ResAb & PHY_B_1000S_MSF) != 0) { SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, - ("Master/Slave Fault port %d\n", Port)); + ("Master/Slave Fault Port %d\n", Port)); pPrt->PAutoNegFail = SK_TRUE; pPrt->PMSStatus = SK_MS_STAT_FAULT; - return (SK_AND_OTHER); + return(SK_AND_OTHER); } - else if (ResAb & PHY_B_1000S_MSR) { - pPrt->PMSStatus = SK_MS_STAT_MASTER; + + pPrt->PMSStatus = ((ResAb & PHY_B_1000S_MSR) != 0) ? + SK_MS_STAT_MASTER : SK_MS_STAT_SLAVE; +#endif /* 0 */ + + /* Check PAUSE mismatch */ + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + if ((AuxStat & PHY_B_AS_PAUSE_MSK) == PHY_B_AS_PAUSE_MSK) { + /* Symmetric PAUSE */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC; + } + else if ((AuxStat & PHY_B_AS_PAUSE_MSK) == PHY_B_AS_PRR) { + /* Enable PAUSE receive, disable PAUSE transmit */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_REM_SEND; + } + else if ((AuxStat & PHY_B_AS_PAUSE_MSK) == PHY_B_AS_PRT) { + /* Disable PAUSE receive, enable PAUSE transmit */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_LOC_SEND; } else { - pPrt->PMSStatus = SK_MS_STAT_SLAVE; + /* PAUSE mismatch -> no PAUSE */ + pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE; + } + pPrt->PLinkSpeedUsed = SK_LSPEED_STAT_1000MBPS; + + return(SK_AND_OK); +} /* SkXmAutoNegDoneBcom */ + + +/****************************************************************************** + * + * SkGmAutoNegDoneMarv() - Auto-negotiation handling + * + * Description: + * This function handles the auto-negotiation if the Done bit is set. + * + * Returns: + * SK_AND_OK o.k. + * SK_AND_DUP_CAP Duplex capability error happened + * SK_AND_OTHER Other error happened + */ +static int SkGmAutoNegDoneMarv( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + SK_U16 LPAb; /* Link Partner Ability */ + SK_U16 ResAb; /* Resolved Ability */ + SK_U16 AuxStat; /* Auxiliary Status */ + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegDoneMarv, Port %d\n", Port)); + pPrt = &pAC->GIni.GP[Port]; + + /* Get PHY parameters */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_AUNE_LP, &LPAb); + + if ((LPAb & PHY_B_AN_RF) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegFail: Remote fault bit set Port %d\n", Port)); + pPrt->PAutoNegFail = SK_TRUE; + return(SK_AND_OTHER); + } + + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_1000T_STAT, &ResAb); + + /* Check Master/Slave resolution */ + if ((ResAb & PHY_B_1000S_MSF) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Master/Slave Fault Port %d\n", Port)); + pPrt->PAutoNegFail = SK_TRUE; + pPrt->PMSStatus = SK_MS_STAT_FAULT; + return(SK_AND_OTHER); + } + + pPrt->PMSStatus = ((ResAb & PHY_B_1000S_MSR) != 0) ? + (SK_U8)SK_MS_STAT_MASTER : (SK_U8)SK_MS_STAT_SLAVE; + + /* Read PHY Specific Status */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_STAT, &AuxStat); + + /* Check Speed & Duplex resolved */ + if ((AuxStat & PHY_M_PS_SPDUP_RES) == 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegFail: Speed & Duplex not resolved Port %d\n", Port)); + pPrt->PAutoNegFail = SK_TRUE; + pPrt->PLinkModeStatus = SK_LMODE_STAT_UNKNOWN; + return(SK_AND_DUP_CAP); + } + + if ((AuxStat & PHY_M_PS_FULL_DUP) != 0) { + pPrt->PLinkModeStatus = SK_LMODE_STAT_AUTOFULL; + } + else { + pPrt->PLinkModeStatus = SK_LMODE_STAT_AUTOHALF; } -#endif /* 0 */ - - /* Check PAUSE mismatch. */ - /* We are NOT using chapter 4.23 of the Xaqti manual. */ - /* We are using IEEE 802.3z/D5.0 Table 37-4. */ - if ((AuxStat & (PHY_B_AS_PRR | PHY_B_AS_PRT)) == - (PHY_B_AS_PRR | PHY_B_AS_PRT)) { - /* Symmetric PAUSE. */ + + /* Check PAUSE mismatch */ + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + if ((AuxStat & PHY_M_PS_PAUSE_MSK) == PHY_M_PS_PAUSE_MSK) { + /* Symmetric PAUSE */ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC; } - else if ((AuxStat & (PHY_B_AS_PRR | PHY_B_AS_PRT)) == PHY_B_AS_PRR) { - /* Enable PAUSE receive, disable PAUSE transmit. */ + else if ((AuxStat & PHY_M_PS_PAUSE_MSK) == PHY_M_PS_RX_P_EN) { + /* Enable PAUSE receive, disable PAUSE transmit */ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_REM_SEND; } - else if ((AuxStat & (PHY_B_AS_PRR | PHY_B_AS_PRT)) == PHY_B_AS_PRT) { - /* Disable PAUSE receive, enable PAUSE transmit. */ + else if ((AuxStat & PHY_M_PS_PAUSE_MSK) == PHY_M_PS_TX_P_EN) { + /* Disable PAUSE receive, enable PAUSE transmit */ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_LOC_SEND; } else { - /* PAUSE mismatch -> no PAUSE. */ + /* PAUSE mismatch -> no PAUSE */ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE; } + + /* set used link speed */ + switch ((unsigned)(AuxStat & PHY_M_PS_SPEED_MSK)) { + case (unsigned)PHY_M_PS_SPEED_1000: + pPrt->PLinkSpeedUsed = SK_LSPEED_STAT_1000MBPS; + break; + case PHY_M_PS_SPEED_100: + pPrt->PLinkSpeedUsed = SK_LSPEED_STAT_100MBPS; + break; + default: + pPrt->PLinkSpeedUsed = SK_LSPEED_STAT_10MBPS; + } - /* We checked everything and may now enable the link. */ - pPrt->PAutoNegFail = SK_FALSE; - - SkXmRxTxEnable(pAC, IoC, Port); - return (SK_AND_OK); -} /* SkXmAutoNegDoneBcom*/ + return(SK_AND_OK); +} /* SkGmAutoNegDoneMarv */ +#ifdef OTHER_PHY /****************************************************************************** * - * SkXmAutoNegDoneLone() - Auto negotiation handling + * SkXmAutoNegDoneLone() - Auto-negotiation handling * * Description: - * This function handles the autonegotiation if the Done bit is set. - * - * Note: - * o The XMACs interrupt source register is NOT read here. + * This function handles the auto-negotiation if the Done bit is set. * * Returns: * SK_AND_OK o.k. * SK_AND_DUP_CAP Duplex capability error happened * SK_AND_OTHER Other error happened */ -static int SkXmAutoNegDoneLone( +static int SkXmAutoNegDoneLone( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ int Port) /* Port Index (MAC_1 + n) */ @@ -1854,26 +3130,26 @@ SK_U16 LPAb; /* Link Partner Ability */ SK_U16 QuickStat; /* Auxiliary Status */ - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, ("AutoNegDoneLone" - "Port %d\n",Port)); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("AutoNegDoneLone, Port %d\n",Port)); pPrt = &pAC->GIni.GP[Port]; /* Get PHY parameters */ - PHY_READ(IoC, pPrt, Port, PHY_LONE_AUNE_LP, &LPAb); - PHY_READ(IoC, pPrt, Port, PHY_LONE_1000T_STAT, &ResAb); - PHY_READ(IoC, pPrt, Port, PHY_LONE_Q_STAT, &QuickStat); + SkXmPhyRead(pAC, IoC, Port, PHY_LONE_AUNE_LP, &LPAb); + SkXmPhyRead(pAC, IoC, Port, PHY_LONE_1000T_STAT, &ResAb); + SkXmPhyRead(pAC, IoC, Port, PHY_LONE_Q_STAT, &QuickStat); - if (LPAb & PHY_L_AN_RF) { + if ((LPAb & PHY_L_AN_RF) != 0) { /* Remote fault bit is set */ /* Error */ - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, ("AutoNegFail: Remote fault bit set Port %d\n", Port)); pPrt->PAutoNegFail = SK_TRUE; - return (SK_AND_OTHER); + return(SK_AND_OTHER); } /* Check Duplex mismatch */ - if (QuickStat & PHY_L_QS_DUP_MOD) { + if ((QuickStat & PHY_L_QS_DUP_MOD) != 0) { pPrt->PLinkModeStatus = SK_LMODE_STAT_AUTOFULL; } else { @@ -1881,13 +3157,13 @@ } /* Check Master/Slave resolution */ - if (ResAb & (PHY_L_1000S_MSF)) { + if ((ResAb & PHY_L_1000S_MSF) != 0) { /* Error */ - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_CTRL, - ("Master/Slave Fault port %d\n", Port)); + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("Master/Slave Fault Port %d\n", Port)); pPrt->PAutoNegFail = SK_TRUE; pPrt->PMSStatus = SK_MS_STAT_FAULT; - return (SK_AND_OTHER); + return(SK_AND_OTHER); } else if (ResAb & PHY_L_1000S_MSR) { pPrt->PMSStatus = SK_MS_STAT_MASTER; @@ -1897,7 +3173,6 @@ } /* Check PAUSE mismatch */ - /* We are NOT using chapter 4.23 of the Xaqti manual */ /* We are using IEEE 802.3z/D5.0 Table 37-4 */ /* we must manually resolve the abilities here */ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE; @@ -1913,7 +3188,7 @@ } break; case SK_FLOW_MODE_SYMMETRIC: - if ((QuickStat & PHY_L_QS_PAUSE) == PHY_L_QS_PAUSE) { + if ((QuickStat & PHY_L_QS_PAUSE) != 0) { /* Symmetric PAUSE */ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC; } @@ -1924,154 +3199,545 @@ /* Enable PAUSE receive, disable PAUSE transmit */ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_REM_SEND; } - else if ((QuickStat & PHY_L_QS_PAUSE) == PHY_L_QS_PAUSE) { + else if ((QuickStat & PHY_L_QS_PAUSE) != 0) { /* Symmetric PAUSE */ pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC; } break; default: - SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, - SKERR_HWI_E016, SKERR_HWI_E016MSG); + SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016, + SKERR_HWI_E016MSG); } - - /* We checked everything and may now enable the link */ - pPrt->PAutoNegFail = SK_FALSE; - - SkXmRxTxEnable(pAC, IoC, Port); - return (SK_AND_OK); + + return(SK_AND_OK); } /* SkXmAutoNegDoneLone */ /****************************************************************************** * - * SkXmAutoNegDoneNat() - Auto negotiation handling + * SkXmAutoNegDoneNat() - Auto-negotiation handling * * Description: - * This function handles the autonegotiation if the Done bit is set. - * - * Note: - * o The XMACs interrupt source register is NOT read here. - * o This function is public because it is used in the diagnostics - * tool, too. + * This function handles the auto-negotiation if the Done bit is set. * * Returns: * SK_AND_OK o.k. * SK_AND_DUP_CAP Duplex capability error happened * SK_AND_OTHER Other error happened */ -static int SkXmAutoNegDoneNat( +static int SkXmAutoNegDoneNat( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ int Port) /* Port Index (MAC_1 + n) */ { /* todo: National */ - return (SK_AND_OK); -} /* SkXmAutoNegDoneNat*/ + return(SK_AND_OK); +} /* SkXmAutoNegDoneNat */ +#endif /* OTHER_PHY */ + + +/****************************************************************************** + * + * SkMacAutoNegDone() - Auto-negotiation handling + * + * Description: calls the auto-negotiation done routines dep. on board type + * + * Returns: + * SK_AND_OK o.k. + * SK_AND_DUP_CAP Duplex capability error happened + * SK_AND_OTHER Other error happened + */ +int SkMacAutoNegDone( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + int Rtv; + + pPrt = &pAC->GIni.GP[Port]; + + switch (pPrt->PhyType) { + case SK_PHY_XMAC: + Rtv = SkXmAutoNegDoneXmac(pAC, IoC, Port); + break; + case SK_PHY_BCOM: + Rtv = SkXmAutoNegDoneBcom(pAC, IoC, Port); + break; + case SK_PHY_MARV_COPPER: + case SK_PHY_MARV_FIBER: + Rtv = SkGmAutoNegDoneMarv(pAC, IoC, Port); + break; +#ifdef OTHER_PHY + case SK_PHY_LONE: + Rtv = SkXmAutoNegDoneLone(pAC, IoC, Port); + break; + case SK_PHY_NAT: + Rtv = SkXmAutoNegDoneNat(pAC, IoC, Port); + break; +#endif /* OTHER_PHY */ + default: + return(SK_AND_OTHER); + } + + if (Rtv != SK_AND_OK) { + return(Rtv); + } + + /* We checked everything and may now enable the link */ + pPrt->PAutoNegFail = SK_FALSE; + + SkMacRxTxEnable(pAC, IoC, Port); + + return(SK_AND_OK); +} /* SkMacAutoNegDone */ /****************************************************************************** * - * SkXmRxTxEnable() - Enable RxTx activity if port is up + * SkXmSetRxTxEn() - Special Set Rx/Tx Enable and some features in XMAC * * Description: + * sets MAC or PHY LoopBack and Duplex Mode in the MMU Command Reg. + * enables Rx/Tx * - * Note: - * o The XMACs interrupt source register is NOT read here. + * Returns: N/A + */ +static void SkXmSetRxTxEn( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +int Para) /* Parameter to set: MAC or PHY LoopBack, Duplex Mode */ +{ + SK_U16 Word; + + XM_IN16(IoC, Port, XM_MMU_CMD, &Word); + + switch (Para & (SK_MAC_LOOPB_ON | SK_MAC_LOOPB_OFF)) { + case SK_MAC_LOOPB_ON: + Word |= XM_MMU_MAC_LB; + break; + case SK_MAC_LOOPB_OFF: + Word &= ~XM_MMU_MAC_LB; + break; + } + + switch (Para & (SK_PHY_LOOPB_ON | SK_PHY_LOOPB_OFF)) { + case SK_PHY_LOOPB_ON: + Word |= XM_MMU_GMII_LOOP; + break; + case SK_PHY_LOOPB_OFF: + Word &= ~XM_MMU_GMII_LOOP; + break; + } + + switch (Para & (SK_PHY_FULLD_ON | SK_PHY_FULLD_OFF)) { + case SK_PHY_FULLD_ON: + Word |= XM_MMU_GMII_FD; + break; + case SK_PHY_FULLD_OFF: + Word &= ~XM_MMU_GMII_FD; + break; + } + + XM_OUT16(IoC, Port, XM_MMU_CMD, Word | XM_MMU_ENA_RX | XM_MMU_ENA_TX); + + /* dummy read to ensure writing */ + XM_IN16(IoC, Port, XM_MMU_CMD, &Word); + +} /* SkXmSetRxTxEn */ + + +/****************************************************************************** + * + * SkGmSetRxTxEn() - Special Set Rx/Tx Enable and some features in GMAC + * + * Description: + * sets MAC LoopBack and Duplex Mode in the General Purpose Control Reg. + * enables Rx/Tx + * + * Returns: N/A + */ +static void SkGmSetRxTxEn( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +int Para) /* Parameter to set: MAC LoopBack, Duplex Mode */ +{ + SK_U16 Ctrl; + + GM_IN16(IoC, Port, GM_GP_CTRL, &Ctrl); + + switch (Para & (SK_MAC_LOOPB_ON | SK_MAC_LOOPB_OFF)) { + case SK_MAC_LOOPB_ON: + Ctrl |= GM_GPCR_LOOP_ENA; + break; + case SK_MAC_LOOPB_OFF: + Ctrl &= ~GM_GPCR_LOOP_ENA; + break; + } + + switch (Para & (SK_PHY_FULLD_ON | SK_PHY_FULLD_OFF)) { + case SK_PHY_FULLD_ON: + Ctrl |= GM_GPCR_DUP_FULL; + break; + case SK_PHY_FULLD_OFF: + Ctrl &= ~GM_GPCR_DUP_FULL; + break; + } + + GM_OUT16(IoC, Port, GM_GP_CTRL, Ctrl | GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); + + /* dummy read to ensure writing */ + GM_IN16(IoC, Port, GM_GP_CTRL, &Ctrl); + +} /* SkGmSetRxTxEn */ + + +/****************************************************************************** + * + * SkMacSetRxTxEn() - Special Set Rx/Tx Enable and parameters + * + * Description: calls the Special Set Rx/Tx Enable routines dep. on board type + * + * Returns: N/A + */ +void SkMacSetRxTxEn( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +int Para) +{ + if (pAC->GIni.GIGenesis) { + + SkXmSetRxTxEn(pAC, IoC, Port, Para); + } + else { + + SkGmSetRxTxEn(pAC, IoC, Port, Para); + } + +} /* SkMacSetRxTxEn */ + + +/****************************************************************************** + * + * SkMacRxTxEnable() - Enable Rx/Tx activity if port is up + * + * Description: enables Rx/Tx dep. on board type * * Returns: * 0 o.k. * != 0 Error happened */ -int SkXmRxTxEnable( +int SkMacRxTxEnable( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ int Port) /* Port Index (MAC_1 + n) */ { SK_GEPORT *pPrt; - SK_U16 Reg; /* 16bit register value */ - SK_U16 IntMask; /* XMac interrupt mask */ + SK_U16 Reg; /* 16-bit register value */ + SK_U16 IntMask; /* MAC interrupt mask */ SK_U16 SWord; pPrt = &pAC->GIni.GP[Port]; if (!pPrt->PHWLinkUp) { /* The Hardware link is NOT up */ - return (0); + return(0); } if ((pPrt->PLinkMode == SK_LMODE_AUTOHALF || pPrt->PLinkMode == SK_LMODE_AUTOFULL || pPrt->PLinkMode == SK_LMODE_AUTOBOTH) && pPrt->PAutoNegFail) { - /* Autonegotiation is not done or failed */ - return (0); + /* Auto-negotiation is not done or failed */ + return(0); } - /* Set Dup Mode and Pause Mode */ - SkXmInitDupMd (pAC, IoC, Port); - SkXmInitPauseMd (pAC, IoC, Port); - - /* - * Initialize the Interrupt Mask Register. Default IRQs are... - * - Link Asynchronous Event - * - Link Partner requests config - * - Auto Negotiation Done - * - Rx Counter Event Overflow - * - Tx Counter Event Overflow - * - Transmit FIFO Underrun - */ - if (pPrt->PhyType == SK_PHY_XMAC) { + if (pAC->GIni.GIGenesis) { + /* set Duplex Mode and Pause Mode */ + SkXmInitDupMd(pAC, IoC, Port); + + SkXmInitPauseMd(pAC, IoC, Port); + + /* + * Initialize the Interrupt Mask Register. Default IRQs are... + * - Link Asynchronous Event + * - Link Partner requests config + * - Auto Negotiation Done + * - Rx Counter Event Overflow + * - Tx Counter Event Overflow + * - Transmit FIFO Underrun + */ IntMask = XM_DEF_MSK; + +#ifdef DEBUG + /* add IRQ for Receive FIFO Overflow */ + IntMask &= ~XM_IS_RXF_OV; +#endif /* DEBUG */ + + if (pPrt->PhyType != SK_PHY_XMAC) { + /* disable GP0 interrupt bit */ + IntMask |= XM_IS_INP_ASS; + } + XM_OUT16(IoC, Port, XM_IMSK, IntMask); + + /* get MMU Command Reg. */ + XM_IN16(IoC, Port, XM_MMU_CMD, &Reg); + + if (pPrt->PhyType != SK_PHY_XMAC && + (pPrt->PLinkModeStatus == SK_LMODE_STAT_FULL || + pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOFULL)) { + /* set to Full Duplex */ + Reg |= XM_MMU_GMII_FD; + } + + switch (pPrt->PhyType) { + case SK_PHY_BCOM: + /* + * Workaround BCOM Errata (#10523) for all BCom Phys + * Enable Power Management after link up + */ + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &SWord); + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, + (SK_U16)(SWord & ~PHY_B_AC_DIS_PM)); + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); + break; +#ifdef OTHER_PHY + case SK_PHY_LONE: + SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_INT_ENAB, PHY_L_DEF_MSK); + break; + case SK_PHY_NAT: + /* todo National: + SkXmPhyWrite(pAC, IoC, Port, PHY_NAT_INT_MASK, PHY_N_DEF_MSK); */ + /* no interrupts possible from National ??? */ + break; +#endif /* OTHER_PHY */ + } + + /* enable Rx/Tx */ + XM_OUT16(IoC, Port, XM_MMU_CMD, Reg | XM_MMU_ENA_RX | XM_MMU_ENA_TX); } else { - /* disable GP0 interrupt bit */ - IntMask = XM_DEF_MSK | XM_IS_INP_ASS; + /* + * Initialize the Interrupt Mask Register. Default IRQs are... + * - Rx Counter Event Overflow + * - Tx Counter Event Overflow + * - Transmit FIFO Underrun + */ + IntMask = GMAC_DEF_MSK; + +#ifdef DEBUG + /* add IRQ for Receive FIFO Overrun */ + IntMask |= GM_IS_RX_FF_OR; +#endif /* DEBUG */ + + SK_OUT8(IoC, GMAC_IRQ_MSK, (SK_U8)IntMask); + + /* get General Purpose Control */ + GM_IN16(IoC, Port, GM_GP_CTRL, &Reg); + + if (pPrt->PLinkModeStatus == SK_LMODE_STAT_FULL || + pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOFULL) { + /* set to Full Duplex */ + Reg |= GM_GPCR_DUP_FULL; + } + + /* enable Rx/Tx */ + GM_OUT16(IoC, Port, GM_GP_CTRL, Reg | GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); + +#ifndef VCPU + /* Enable all PHY interrupts */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); +#endif /* VCPU */ } - XM_OUT16(IoC, Port, XM_IMSK, IntMask); + + return(0); + +} /* SkMacRxTxEnable */ + + +/****************************************************************************** + * + * SkMacRxTxDisable() - Disable Receiver and Transmitter + * + * Description: disables Rx/Tx dep. on board type + * + * Returns: N/A + */ +void SkMacRxTxDisable( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_U16 Word; - /* RX/TX enable */ - XM_IN16(IoC, Port, XM_MMU_CMD, &Reg); - if (pPrt->PhyType != SK_PHY_XMAC && - (pPrt->PLinkModeStatus == SK_LMODE_STAT_FULL || - pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOFULL)) { - Reg |= XM_MMU_GMII_FD; + if (pAC->GIni.GIGenesis) { + + XM_IN16(IoC, Port, XM_MMU_CMD, &Word); + + XM_OUT16(IoC, Port, XM_MMU_CMD, Word & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); + + /* dummy read to ensure writing */ + XM_IN16(IoC, Port, XM_MMU_CMD, &Word); } - switch (pPrt->PhyType) { - case SK_PHY_BCOM: - /* Workaround BCOM Errata (#10523) for all BCom Phys */ - /* Enable Power Management after link up */ - PHY_READ(IoC, pPrt, Port, PHY_BCOM_AUX_CTRL, &SWord); - PHY_WRITE(IoC, pPrt, Port, PHY_BCOM_AUX_CTRL, SWord & ~PHY_B_AC_DIS_PM); - PHY_WRITE(IoC, pPrt, Port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); - break; - case SK_PHY_LONE: - PHY_WRITE(IoC, pPrt, Port, PHY_LONE_INT_ENAB, PHY_L_DEF_MSK); - break; - case SK_PHY_NAT: - /* todo National: - PHY_WRITE(IoC, pPrt, Port, PHY_NAT_INT_MASK, - PHY_N_DEF_MSK); */ - /* no interrupts possible from National ??? */ - break; + else { + + GM_IN16(IoC, Port, GM_GP_CTRL, &Word); + + GM_OUT16(IoC, Port, GM_GP_CTRL, Word & ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)); + + /* dummy read to ensure writing */ + GM_IN16(IoC, Port, GM_GP_CTRL, &Word); + } +} /* SkMacRxTxDisable */ + + +/****************************************************************************** + * + * SkMacIrqDisable() - Disable IRQ from MAC + * + * Description: sets the IRQ-mask to disable IRQ dep. on board type + * + * Returns: N/A + */ +void SkMacIrqDisable( +SK_AC *pAC, /* Adapter Context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + SK_U16 Word; + + pPrt = &pAC->GIni.GP[Port]; + + if (pAC->GIni.GIGenesis) { + + /* disable all XMAC IRQs */ + XM_OUT16(IoC, Port, XM_IMSK, 0xffff); + + /* Disable all PHY interrupts */ + switch (pPrt->PhyType) { + case SK_PHY_BCOM: + /* Make sure that PHY is initialized */ + if (pPrt->PState != SK_PRT_RESET) { + /* NOT allowed if BCOM is in RESET state */ + /* Workaround BCOM Errata (#10523) all BCom */ + /* Disable Power Management if link is down */ + SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &Word); + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, + (SK_U16)(Word | PHY_B_AC_DIS_PM)); + SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_INT_MASK, 0xffff); + } + break; +#ifdef OTHER_PHY + case SK_PHY_LONE: + SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_INT_ENAB, 0); + break; + case SK_PHY_NAT: + /* todo: National + SkXmPhyWrite(pAC, IoC, Port, PHY_NAT_INT_MASK, 0xffff); */ + break; +#endif /* OTHER_PHY */ + } + } + else { + /* disable all GMAC IRQs */ + SK_OUT8(IoC, GMAC_IRQ_MSK, 0); + +#ifndef VCPU + /* Disable all PHY interrupts */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_INT_MASK, 0); +#endif /* VCPU */ + } +} /* SkMacIrqDisable */ + + +#ifdef SK_DIAG +/****************************************************************************** + * + * SkXmSendCont() - Enable / Disable Send Continuous Mode + * + * Description: enable / disable Send Continuous Mode on XMAC + * + * Returns: + * nothing + */ +void SkXmSendCont( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL Enable) /* Enable / Disable */ +{ + SK_U32 MdReg; + + XM_IN32(IoC, Port, XM_MODE, &MdReg); + + if (Enable) { + MdReg |= XM_MD_TX_CONT; + } + else { + MdReg &= ~XM_MD_TX_CONT; + } + /* setup Mode Register */ + XM_OUT32(IoC, Port, XM_MODE, MdReg); + +} /* SkXmSendCont*/ + +/****************************************************************************** + * + * SkMacTimeStamp() - Enable / Disable Time Stamp + * + * Description: enable / disable Time Stamp generation for Rx packets + * + * Returns: + * nothing + */ +void SkMacTimeStamp( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL Enable) /* Enable / Disable */ +{ + SK_U32 MdReg; + SK_U8 TimeCtrl; + + if (pAC->GIni.GIGenesis) { + + XM_IN32(IoC, Port, XM_MODE, &MdReg); + + if (Enable) { + MdReg |= XM_MD_ATS; + } + else { + MdReg &= ~XM_MD_ATS; + } + /* setup Mode Register */ + XM_OUT32(IoC, Port, XM_MODE, MdReg); + } + else { + if (Enable) { + TimeCtrl = GMT_ST_START | GMT_ST_CLR_IRQ; + } + else { + TimeCtrl = GMT_ST_STOP | GMT_ST_CLR_IRQ; + } + /* Start/Stop Time Stamp Timer */ + SK_OUT8(pAC, GMAC_TI_ST_CTRL, TimeCtrl); } - XM_OUT16(IoC, Port, XM_MMU_CMD, Reg | XM_MMU_ENA_RX | XM_MMU_ENA_TX); - - return (0); -} /* SkXmRxTxEnable*/ +} /* SkMacTimeStamp*/ -#ifndef SK_DIAG +#else /* SK_DIAG */ /****************************************************************************** * - * SkXmIrq() - Interrupt service routine + * SkXmIrq() - Interrupt Service Routine * - * Description: - * Services an Interrupt of the XMAC II + * Description: services an Interrupt Request of the XMAC * * Note: - * The XMACs interrupt source register is NOT read here. - * With an external PHY, some interrupt bits are not meaningfull - * any more: + * With an external PHY, some interrupt bits are not meaningfull any more: * - LinkAsyncEvent (bit #14) XM_IS_LNK_AE * - LinkPartnerReqConfig (bit #10) XM_IS_LIPA_RC * - Page Received (bit #9) XM_IS_RX_PAGE @@ -2086,124 +3752,589 @@ void SkXmIrq( SK_AC *pAC, /* adapter context */ SK_IOC IoC, /* IO context */ -int Port, /* Port Index (MAC_1 + n) */ -SK_U16 IStatus) /* Interrupt status read from the XMAC */ +int Port) /* Port Index (MAC_1 + n) */ { SK_GEPORT *pPrt; SK_EVPARA Para; + SK_U16 IStatus; /* Interrupt status read from the XMAC */ SK_U16 IStatus2; pPrt = &pAC->GIni.GP[Port]; - if (pPrt->PhyType != SK_PHY_XMAC) { + XM_IN16(IoC, Port, XM_ISRC, &IStatus); + + /* LinkPartner Auto-negable? */ + if (pPrt->PhyType == SK_PHY_XMAC) { + SkXmAutoNegLipaXmac(pAC, IoC, Port, IStatus); + } + else { /* mask bits that are not used with ext. PHY */ IStatus &= ~(XM_IS_LNK_AE | XM_IS_LIPA_RC | XM_IS_RX_PAGE | XM_IS_TX_PAGE | XM_IS_AND | XM_IS_INP_ASS); } - /* - * LinkPartner Autonegable? - */ - if (pPrt->PhyType == SK_PHY_XMAC) { - SkXmAutoNegLipaXmac(pAC, IoC, Port, IStatus); - } - SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, - ("XmacIrq Port %d Isr %x\n", Port, IStatus)); + ("XmacIrq Port %d Isr 0x%04x\n", Port, IStatus)); if (!pPrt->PHWLinkUp) { /* Spurious XMAC interrupt */ SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, - ("SkXmIrq: spurious interrupt on port %d\n", Port)); + ("SkXmIrq: spurious interrupt on Port %d\n", Port)); return; } - if (IStatus & XM_IS_INP_ASS) { + if ((IStatus & XM_IS_INP_ASS) != 0) { /* Reread ISR Register if link is not in sync */ XM_IN16(IoC, Port, XM_ISRC, &IStatus2); SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, - ("SkXmIrq: Link async. Double check port %d %x %x\n", + ("SkXmIrq: Link async. Double check Port %d 0x%04x 0x%04x\n", Port, IStatus, IStatus2)); IStatus &= ~XM_IS_INP_ASS; IStatus |= IStatus2; - } - if (IStatus & XM_IS_LNK_AE) { - /* not used GP0 is used instead */ + if ((IStatus & XM_IS_LNK_AE) != 0) { + /* not used, GP0 is used instead */ } - if (IStatus & XM_IS_TX_ABORT) { + if ((IStatus & XM_IS_TX_ABORT) != 0) { /* not used */ } - if (IStatus & XM_IS_FRC_INT) { - /* not used. use ASIC IRQ instead if needed */ + if ((IStatus & XM_IS_FRC_INT) != 0) { + /* not used, use ASIC IRQ instead if needed */ } - if (IStatus & (XM_IS_INP_ASS | XM_IS_LIPA_RC | XM_IS_RX_PAGE)) { + if ((IStatus & (XM_IS_INP_ASS | XM_IS_LIPA_RC | XM_IS_RX_PAGE)) != 0) { SkHWLinkDown(pAC, IoC, Port); /* Signal to RLMT */ - Para.Para32[0] = (SK_U32) Port; + Para.Para32[0] = (SK_U32)Port; SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para); /* Start workaround Errata #2 timer */ - SkTimerStart(pAC, IoC, &pAC->GIni.GP[Port].PWaTimer, - SK_WA_INA_TIME, SKGE_HWAC, SK_HWEV_WATIM, Para); + SkTimerStart(pAC, IoC, &pPrt->PWaTimer, SK_WA_INA_TIME, + SKGE_HWAC, SK_HWEV_WATIM, Para); } - if (IStatus & XM_IS_RX_PAGE) { + if ((IStatus & XM_IS_RX_PAGE) != 0) { /* not used */ } - if (IStatus & XM_IS_TX_PAGE) { + if ((IStatus & XM_IS_TX_PAGE) != 0) { /* not used */ } - if (IStatus & XM_IS_AND) { - SK_DBG_MSG(pAC,SK_DBGMOD_HWM,SK_DBGCAT_IRQ, - ("SkXmIrq: AND on link that is up port %d\n", Port)); + if ((IStatus & XM_IS_AND) != 0) { + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("SkXmIrq: AND on link that is up Port %d\n", Port)); } - if (IStatus & XM_IS_TSC_OV) { + if ((IStatus & XM_IS_TSC_OV) != 0) { /* not used */ } - if (IStatus & XM_IS_RXC_OV) { - Para.Para32[0] = (SK_U32) Port; - Para.Para32[1] = (SK_U32) IStatus; + /* Combined Tx & Rx Counter Overflow SIRQ Event */ + if ((IStatus & (XM_IS_RXC_OV | XM_IS_TXC_OV)) != 0) { + Para.Para32[0] = (SK_U32)Port; + Para.Para32[1] = (SK_U32)IStatus; SkPnmiEvent(pAC, IoC, SK_PNMI_EVT_SIRQ_OVERFLOW, Para); } - if (IStatus & XM_IS_TXC_OV) { - Para.Para32[0] = (SK_U32) Port; - Para.Para32[1] = (SK_U32) IStatus; + if ((IStatus & XM_IS_RXF_OV) != 0) { + /* normal situation -> no effect */ + pPrt->PRxOverCnt++; + } + + if ((IStatus & XM_IS_TXF_UR) != 0) { + /* may NOT happen -> error log */ + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E020, SKERR_SIRQ_E020MSG); + } + + if ((IStatus & XM_IS_TX_COMP) != 0) { + /* not served here */ + } + + if ((IStatus & XM_IS_RX_COMP) != 0) { + /* not served here */ + } +} /* SkXmIrq */ + + +/****************************************************************************** + * + * SkGmIrq() - Interrupt Service Routine + * + * Description: services an Interrupt Request of the GMAC + * + * Note: + * + * Returns: + * nothing + */ +void SkGmIrq( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + SK_EVPARA Para; + SK_U8 IStatus; /* Interrupt status */ + + pPrt = &pAC->GIni.GP[Port]; + + SK_IN8(IoC, GMAC_IRQ_SRC, &IStatus); + + /* LinkPartner Auto-negable? */ + SkMacAutoNegLipaPhy(pAC, IoC, Port, IStatus); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ, + ("GmacIrq Port %d Isr 0x%04x\n", Port, IStatus)); + + /* Combined Tx & Rx Counter Overflow SIRQ Event */ + if (IStatus & (GM_IS_RX_CO_OV | GM_IS_TX_CO_OV)) { + /* these IRQs will be cleared by reading GMACs register */ + Para.Para32[0] = (SK_U32)Port; + Para.Para32[1] = (SK_U32)IStatus; SkPnmiEvent(pAC, IoC, SK_PNMI_EVT_SIRQ_OVERFLOW, Para); } - if (IStatus & XM_IS_RXF_OV) { - /* normal situation -> no effect */ + if (IStatus & GM_IS_RX_FF_OR) { + /* clear GMAC Rx FIFO Overrun IRQ */ + SK_OUT8(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), (SK_U8)GMF_CLI_RX_FO); + + pPrt->PRxOverCnt++; } - if (IStatus & XM_IS_TXF_UR) { + if (IStatus & GM_IS_TX_FF_UR) { + /* clear GMAC Tx FIFO Underrun IRQ */ + SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), (SK_U8)GMF_CLI_TX_FU); /* may NOT happen -> error log */ - SK_ERR_LOG(pAC, SK_ERRCL_HW , SKERR_SIRQ_E020, - SKERR_SIRQ_E020MSG); + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E020, SKERR_SIRQ_E020MSG); } - if (IStatus & XM_IS_TX_COMP) { + if (IStatus & GM_IS_TX_COMPL) { /* not served here */ } - if (IStatus & XM_IS_RX_COMP) { + if (IStatus & GM_IS_RX_COMPL) { /* not served here */ } +} /* SkGmIrq */ + +/****************************************************************************** + * + * SkMacIrq() - Interrupt Service Routine for MAC + * + * Description: calls the Interrupt Service Routine dep. on board type + * + * Returns: + * nothing + */ +void SkMacIrq( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port) /* Port Index (MAC_1 + n) */ +{ -} /* SkXmIrq*/ + if (pAC->GIni.GIGenesis) { + /* IRQ from XMAC */ + SkXmIrq(pAC, IoC, Port); + } + else { + /* IRQ from GMAC */ + SkGmIrq(pAC, IoC, Port); + } +} /* SkMacIrq */ #endif /* !SK_DIAG */ +/****************************************************************************** + * + * SkXmUpdateStats() - Force the XMAC to output the current statistic + * + * Description: + * The XMAC holds its statistic internally. To obtain the current + * values a command must be sent so that the statistic data will + * be written to a predefined memory area on the adapter. + * + * Returns: + * 0: success + * 1: something went wrong + */ +int SkXmUpdateStats( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port) /* Port Index (MAC_1 + n) */ +{ + SK_GEPORT *pPrt; + SK_U16 StatReg; + int WaitIndex; + + pPrt = &pAC->GIni.GP[Port]; + WaitIndex = 0; + + /* Send an update command to XMAC specified */ + XM_OUT16(IoC, Port, XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC); + + /* + * It is an auto-clearing register. If the command bits + * went to zero again, the statistics are transferred. + * Normally the command should be executed immediately. + * But just to be sure we execute a loop. + */ + do { + + XM_IN16(IoC, Port, XM_STAT_CMD, &StatReg); + + if (++WaitIndex > 10) { + + SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_HWI_E021, SKERR_HWI_E021MSG); + + return(1); + } + } while ((StatReg & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) != 0); + + return(0); +} /* SkXmUpdateStats */ + +/****************************************************************************** + * + * SkGmUpdateStats() - Force the GMAC to output the current statistic + * + * Description: + * Empty function for GMAC. Statistic data is accessible in direct way. + * + * Returns: + * 0: success + * 1: something went wrong + */ +int SkGmUpdateStats( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port) /* Port Index (MAC_1 + n) */ +{ + return(0); +} + +/****************************************************************************** + * + * SkXmMacStatistic() - Get XMAC counter value + * + * Description: + * Gets the 32bit counter value. Except for the octet counters + * the lower 32bit are counted in hardware and the upper 32bit + * must be counted in software by monitoring counter overflow interrupts. + * + * Returns: + * 0: success + * 1: something went wrong + */ +int SkXmMacStatistic( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port, /* Port Index (MAC_1 + n) */ +SK_U16 StatAddr, /* MIB counter base address */ +SK_U32 *pVal) /* ptr to return statistic value */ +{ + if ((StatAddr < XM_TXF_OK) || (StatAddr > XM_RXF_MAX_SZ)) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E022, SKERR_HWI_E022MSG); + + return(1); + } + + XM_IN32(IoC, Port, StatAddr, pVal); + + return(0); +} /* SkXmMacStatistic */ + +/****************************************************************************** + * + * SkGmMacStatistic() - Get GMAC counter value + * + * Description: + * Gets the 32bit counter value. Except for the octet counters + * the lower 32bit are counted in hardware and the upper 32bit + * must be counted in software by monitoring counter overflow interrupts. + * + * Returns: + * 0: success + * 1: something went wrong + */ +int SkGmMacStatistic( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port, /* Port Index (MAC_1 + n) */ +SK_U16 StatAddr, /* MIB counter base address */ +SK_U32 *pVal) /* ptr to return statistic value */ +{ + + if ((StatAddr < GM_RXF_UC_OK) || (StatAddr > GM_TXE_FIFO_UR)) { + + SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E022, SKERR_HWI_E022MSG); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("SkGmMacStat: wrong MIB counter 0x%04X\n", StatAddr)); + return(1); + } + + GM_IN32(IoC, Port, StatAddr, pVal); + + return(0); +} /* SkGmMacStatistic */ + +/****************************************************************************** + * + * SkXmResetCounter() - Clear MAC statistic counter + * + * Description: + * Force the XMAC to clear its statistic counter. + * + * Returns: + * 0: success + * 1: something went wrong + */ +int SkXmResetCounter( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port) /* Port Index (MAC_1 + n) */ +{ + XM_OUT16(IoC, Port, XM_STAT_CMD, XM_SC_CLR_RXC | XM_SC_CLR_TXC); + /* Clear two times according to Errata #3 */ + XM_OUT16(IoC, Port, XM_STAT_CMD, XM_SC_CLR_RXC | XM_SC_CLR_TXC); + + return(0); +} /* SkXmResetCounter */ + +/****************************************************************************** + * + * SkGmResetCounter() - Clear MAC statistic counter + * + * Description: + * Force GMAC to clear its statistic counter. + * + * Returns: + * 0: success + * 1: something went wrong + */ +int SkGmResetCounter( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port) /* Port Index (MAC_1 + n) */ +{ + SK_U16 Reg; /* Phy Address Register */ + SK_U16 Word; + int i; + + GM_IN16(IoC, Port, GM_PHY_ADDR, &Reg); + +#ifndef VCPU + /* set MIB Clear Counter Mode */ + GM_OUT16(IoC, Port, GM_PHY_ADDR, Reg | GM_PAR_MIB_CLR); + + /* read all MIB Counters with Clear Mode set */ + for (i = 0; i < GM_MIB_CNT_SIZE; i++) { + /* the reset is performed only when the lower 16 bits are read */ + GM_IN16(IoC, Port, GM_MIB_CNT_BASE + 8*i, &Word); + } + + /* clear MIB Clear Counter Mode */ + GM_OUT16(IoC, Port, GM_PHY_ADDR, Reg); +#endif /* !VCPU */ + + return(0); +} /* SkGmResetCounter */ + +/****************************************************************************** + * + * SkXmOverflowStatus() - Gets the status of counter overflow interrupt + * + * Description: + * Checks the source causing an counter overflow interrupt. On success the + * resulting counter overflow status is written to , whereas the + * upper dword stores the XMAC ReceiveCounterEvent register and the lower + * dword the XMAC TransmitCounterEvent register. + * + * Note: + * For XMAC the interrupt source is a self-clearing register, so the source + * must be checked only once. SIRQ module does another check to be sure + * that no interrupt get lost during process time. + * + * Returns: + * 0: success + * 1: something went wrong + */ +int SkXmOverflowStatus( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port, /* Port Index (MAC_1 + n) */ +SK_U16 IStatus, /* Interupt Status from MAC */ +SK_U64 *pStatus) /* ptr for return overflow status value */ +{ + SK_U64 Status; /* Overflow status */ + SK_U32 RegVal; + + Status = 0; + + if ((IStatus & XM_IS_RXC_OV) != 0) { + + XM_IN32(IoC, Port, XM_RX_CNT_EV, &RegVal); + Status |= (SK_U64)RegVal << 32; + } + + if ((IStatus & XM_IS_TXC_OV) != 0) { + + XM_IN32(IoC, Port, XM_TX_CNT_EV, &RegVal); + Status |= (SK_U64)RegVal; + } + + *pStatus = Status; + + return(0); +} /* SkXmOverflowStatus */ + + +/****************************************************************************** + * + * SkGmOverflowStatus() - Gets the status of counter overflow interrupt + * + * Description: + * Checks the source causing an counter overflow interrupt. On success the + * resulting counter overflow status is written to , whereas the + * the following bit coding is used: + * 63:56 - unused + * 55:48 - TxRx interrupt register bit7:0 + * 32:47 - Rx interrupt register + * 31:24 - unused + * 23:16 - TxRx interrupt register bit15:8 + * 15:0 - Tx interrupt register + * + * Returns: + * 0: success + * 1: something went wrong + */ +int SkGmOverflowStatus( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +unsigned int Port, /* Port Index (MAC_1 + n) */ +SK_U16 IStatus, /* Interupt Status from MAC */ +SK_U64 *pStatus) /* ptr for return overflow status value */ +{ + SK_U64 Status; /* Overflow status */ + SK_U16 RegVal; + + Status = 0; + + if ((IStatus & GM_IS_RX_CO_OV) != 0) { + /* this register is self-clearing after read */ + GM_IN16(IoC, Port, GM_RX_IRQ_SRC, &RegVal); + Status |= (SK_U64)RegVal << 32; + } + + if ((IStatus & GM_IS_TX_CO_OV) != 0) { + /* this register is self-clearing after read */ + GM_IN16(IoC, Port, GM_TX_IRQ_SRC, &RegVal); + Status |= (SK_U64)RegVal; + } + + /* this register is self-clearing after read */ + GM_IN16(IoC, Port, GM_TR_IRQ_SRC, &RegVal); + /* Rx overflow interrupt register bits (LoByte)*/ + Status |= (SK_U64)((SK_U8)RegVal) << 48; + /* Tx overflow interrupt register bits (HiByte)*/ + Status |= (SK_U64)(RegVal >> 8) << 16; + + *pStatus = Status; + + return(0); +} /* SkGmOverflowStatus */ + +/****************************************************************************** + * + * SkGmCableDiagStatus() - Starts / Gets status of cable diagnostic test + * + * Description: + * starts the cable diagnostic test if 'StartTest' is true + * gets the results if 'StartTest' is true + * + * NOTE: this test is meaningful only when link is down + * + * Returns: + * 0: success + * 1: no YUKON copper + * 2: test in progress + */ +int SkGmCableDiagStatus( +SK_AC *pAC, /* adapter context */ +SK_IOC IoC, /* IO context */ +int Port, /* Port Index (MAC_1 + n) */ +SK_BOOL StartTest) /* flag for start / get result */ +{ + int i; + SK_U16 RegVal; + SK_GEPORT *pPrt; + + pPrt = &pAC->GIni.GP[Port]; + + if (pPrt->PhyType != SK_PHY_MARV_COPPER) { + + return(1); + } + + if (StartTest) { + /* only start the cable test */ + if ((pPrt->PhyId1 & PHY_I1_REV_MSK) < 4) { + /* apply TDR workaround from Marvell */ + SkGmPhyWrite(pAC, IoC, Port, 29, 0x001e); + + SkGmPhyWrite(pAC, IoC, Port, 30, 0xcc00); + SkGmPhyWrite(pAC, IoC, Port, 30, 0xc800); + SkGmPhyWrite(pAC, IoC, Port, 30, 0xc400); + SkGmPhyWrite(pAC, IoC, Port, 30, 0xc000); + SkGmPhyWrite(pAC, IoC, Port, 30, 0xc100); + } + + /* set address to 0 for MDI[0] */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_ADR, 0); + + /* Read Cable Diagnostic Reg */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CABLE_DIAG, &RegVal); + + /* start Cable Diagnostic Test */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CABLE_DIAG, + RegVal | PHY_M_CABD_ENA_TEST); + + return(0); + } + + /* Read Cable Diagnostic Reg */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CABLE_DIAG, &RegVal); + + SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL, + ("PHY Cable Diag.=0x%04X\n", RegVal)); + + if ((RegVal & PHY_M_CABD_ENA_TEST) != 0) { + /* test is running */ + return(2); + } + + /* get the test results */ + for (i = 0; i < 4; i++) { + /* set address to i for MDI[i] */ + SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_ADR, (SK_U16)i); + + /* get Cable Diagnostic values */ + SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CABLE_DIAG, &RegVal); + + pPrt->PMdiPairLen[i] = (SK_U8)(RegVal & PHY_M_CABD_DIST_MSK); + + pPrt->PMdiPairSts[i] = (SK_U8)((RegVal & PHY_M_CABD_STAT_MSK) >> 13); + } + + return(0); +} /* SkGmCableDiagStatus */ + /* End of file */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/smc9194.c linux.21pre4-ac2/drivers/net/smc9194.c --- linux.21pre4/drivers/net/smc9194.c 2003-01-29 16:26:41.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/smc9194.c 2003-01-28 16:19:58.000000000 +0000 @@ -483,10 +483,20 @@ printk(CARDNAME": Bad Craziness - sent packet while busy.\n" ); return 1; } - lp->saved_skb = skb; - length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; + length = skb->len; + if(length < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + { + netif_wake_queue(dev); + return 0; + } + length = ETH_ZLEN; + } + lp->saved_skb = skb; /* ** The MMU wants the number of pages to be the number of 256 bytes diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/starfire.c linux.21pre4-ac2/drivers/net/starfire.c --- linux.21pre4/drivers/net/starfire.c 2003-01-29 17:07:45.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/starfire.c 2003-02-04 15:02:26.000000000 +0000 @@ -101,15 +101,33 @@ - Better stats and error handling (Ion Badulescu) - Use new pci_set_mwi() PCI API function (jgarzik) -TODO: - - implement tx_timeout() properly + LK1.3.7 (Ion Badulescu) + - minimal implementation of tx_timeout() + - correctly shutdown the Rx/Tx engines in netdev_close() + - added calls to netif_carrier_on/off + (patch from Stefan Rompf ) - VLAN support + + LK1.3.8 (Ion Badulescu) + - adjust DMA burst size on sparc64 + - 64-bit support + - reworked zerocopy support for 64-bit buffers + - working and usable interrupt mitigation/latency + - reduced Tx interrupt frequency for lower interrupt overhead + + LK1.3.9 (Ion Badulescu) + - bugfix for mcast filter + - enable the right kind of Tx interrupts (TxDMADone, not TxDone) + +TODO: + - full NAPI support */ #define DRV_NAME "starfire" -#define DRV_VERSION "1.03+LK1.3.6" -#define DRV_RELDATE "March 7, 2002" +#define DRV_VERSION "1.03+LK1.3.9" +#define DRV_RELDATE "December 13, 2002" +#include #include #include #include @@ -118,7 +136,6 @@ #include #include #include -#include #include /* Processor type for cache alignment. */ #include #include @@ -154,11 +171,16 @@ #include "starfire_firmware.h" #endif /* HAS_FIRMWARE */ +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#define VLAN_SUPPORT +#endif + /* The user-configurable values. These may be modified when a driver module is loaded.*/ /* Used for tuning interrupt latency vs. overhead. */ -static int interrupt_mitigation; +static int intr_latency; +static int small_frames; static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ static int max_interrupt_work = 20; @@ -166,6 +188,12 @@ /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). The Starfire has a 512 element hash table based on the Ethernet CRC. */ static int multicast_filter_limit = 512; +/* Whether to do TCP/UDP checksums in hardware */ +#ifdef HAS_FIRMWARE +static int enable_hw_cksum = 1; +#else +static int enable_hw_cksum = 0; +#endif #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ /* @@ -178,7 +206,9 @@ * packets as the starfire doesn't allow for misaligned DMAs ;-( * 23/10/2000 - Jes * - * The Alpha and the Sparc don't allow unaligned loads, either. -Ion + * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64, + * at least, having unaligned frames leads to a rather serious performance + * penalty. -Ion */ #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) static int rx_copybreak = PKT_BUF_SZ; @@ -186,9 +216,17 @@ static int rx_copybreak /* = 0 */; #endif +/* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */ +#ifdef __sparc__ +#define DMA_BURST_SIZE 64 +#else +#define DMA_BURST_SIZE 128 +#endif + /* Used to pass the media type, etc. Both 'options[]' and 'full_duplex[]' exist for driver interoperability. The media type is usually passed in 'options[]'. + These variables are deprecated, use ethtool instead. -Ion */ #define MAX_UNITS 8 /* More are supported, limit only on options */ static int options[MAX_UNITS] = {0, }; @@ -198,33 +236,55 @@ /* The "native" ring sizes are either 256 or 2048. However in some modes a descriptor may be marked to wrap the ring earlier. - The driver allocates a single page for each descriptor ring, constraining - the maximum size in an architecture-dependent way. */ #define RX_RING_SIZE 256 #define TX_RING_SIZE 32 /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */ #define DONE_Q_SIZE 1024 +/* All queues must be aligned on a 256-byte boundary */ +#define QUEUE_ALIGN 256 + +#if RX_RING_SIZE > 256 +#define RX_Q_ENTRIES Rx2048QEntries +#else +#define RX_Q_ENTRIES Rx256QEntries +#endif /* Operational parameters that usually are not changed. */ /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (2 * HZ) -#ifdef ZEROCOPY -#if MAX_SKB_FRAGS <= 6 -#define MAX_STARFIRE_FRAGS 6 -#else /* MAX_STARFIRE_FRAGS > 6 */ -#warning This driver will not work with more than 6 skb fragments. -#warning Turning off zerocopy support. -#undef ZEROCOPY -#endif /* MAX_STARFIRE_FRAGS > 6 */ -#endif /* ZEROCOPY */ +/* + * This SUCKS. + * We need a much better method to determine if dma_addr_t is 64-bit. + */ +#if (defined(__i386__) && defined(CONFIG_HIGHMEM) && (LINUX_VERSION_CODE > 0x20500 || defined(CONFIG_HIGHMEM64G))) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) +/* 64-bit dma_addr_t */ +#define ADDR_64BITS /* This chip uses 64 bit addresses. */ +#define cpu_to_dma(x) cpu_to_le64(x) +#define dma_to_cpu(x) le64_to_cpu(x) +#define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit +#define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit +#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit +#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit +#define RX_DESC_ADDR_SIZE RxDescAddr64bit +#else /* 32-bit dma_addr_t */ +#define cpu_to_dma(x) cpu_to_le32(x) +#define dma_to_cpu(x) le32_to_cpu(x) +#define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit +#define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit +#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit +#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit +#define RX_DESC_ADDR_SIZE RxDescAddr32bit +#endif -#ifdef ZEROCOPY +#ifdef MAX_SKB_FRAGS #define skb_first_frag_len(skb) skb_headlen(skb) -#else /* not ZEROCOPY */ +#define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) +#else /* not MAX_SKB_FRAGS */ #define skb_first_frag_len(skb) (skb->len) -#endif /* not ZEROCOPY */ +#define skb_num_frags(skb) 1 +#endif /* not MAX_SKB_FRAGS */ /* 2.2.x compatibility code */ #if LINUX_VERSION_CODE < 0x20300 @@ -233,9 +293,12 @@ #else /* LINUX_VERSION_CODE > 0x20300 */ +#include #include #include +#include + #define COMPAT_MOD_INC_USE_COUNT #define COMPAT_MOD_DEC_USE_COUNT @@ -266,15 +329,20 @@ MODULE_PARM(mtu, "i"); MODULE_PARM(debug, "i"); MODULE_PARM(rx_copybreak, "i"); -MODULE_PARM(interrupt_mitigation, "i"); +MODULE_PARM(intr_latency, "i"); +MODULE_PARM(small_frames, "i"); MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i"); MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i"); -MODULE_PARM_DESC(max_interrupt_work, "Starfire maximum events handled per interrupt"); -MODULE_PARM_DESC(mtu, "Starfire MTU (all boards)"); -MODULE_PARM_DESC(debug, "Starfire debug level (0-6)"); -MODULE_PARM_DESC(rx_copybreak, "Starfire copy breakpoint for copy-only-tiny-frames"); -MODULE_PARM_DESC(options, "Starfire: Bits 0-3: media type, bit 17: full duplex"); -MODULE_PARM_DESC(full_duplex, "Starfire full duplex setting(s) (1)"); +MODULE_PARM(enable_hw_cksum, "i"); +MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt"); +MODULE_PARM_DESC(mtu, "MTU (all boards)"); +MODULE_PARM_DESC(debug, "Debug level (0-6)"); +MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); +MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds"); +MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)"); +MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex"); +MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)"); +MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)"); /* Theory of Operation @@ -303,14 +371,14 @@ IIIb/c. Transmit/Receive Structure See the Adaptec manual for the many possible structures, and options for -each structure. There are far too many to document here. +each structure. There are far too many to document all of them here. For transmit this driver uses type 0/1 transmit descriptors (depending -on the presence of the zerocopy infrastructure), and relies on automatic +on the 32/64 bitness of the architecture), and relies on automatic minimum-length padding. It does not use the completion queue consumer index, but instead checks for non-zero status entries. -For receive this driver uses type 0 receive descriptors. The driver +For receive this driver uses type 0/1/2/3 receive descriptors. The driver allocates full frame size skbuffs for the Rx ring buffers, so all frames should fit in a single descriptor. The driver does not use the completion queue consumer index, but instead checks for non-zero status entries. @@ -335,15 +403,15 @@ dev->tbusy flag. The other thread is the interrupt handler, which is single threaded by the hardware and interrupt handling software. -The send packet thread has partial control over the Tx ring and 'dev->tbusy' -flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next -queue slot is empty, it clears the tbusy flag when finished otherwise it sets -the 'lp->tx_full' flag. +The send packet thread has partial control over the Tx ring and the netif_queue +status. If the number of free Tx slots in the ring falls below a certain number +(currently hardcoded to 4), it signals the upper layer to stop the queue. The interrupt handler has exclusive control over the Rx ring and records stats from the Tx ring. After reaping the stats, it marks the Tx queue entry as -empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it -clears both the tx_full and tbusy flags. +empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the +number of free Tx slow is above the threshold, it signals the upper layer to +restart the queue. IV. Notes @@ -355,18 +423,15 @@ IVc. Errata +- StopOnPerr is broken, don't enable +- Hardware ethernet padding exposes random data, perform software padding + instead (unverified -- works correctly for all the hardware I have) + */ enum chip_capability_flags {CanHaveMII=1, }; -#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR0) - -#if 0 -#define ADDR_64BITS 1 /* This chip uses 64 bit addresses. */ -#endif - -#define HAS_IP_COPYSUM 1 enum chipset { CH_6915 = 0, @@ -398,7 +463,7 @@ enum register_offsets { PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074, IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088, - MIICtrl=0x52000, StationAddr=0x50120, EEPROMCtrl=0x51000, + MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000, GPIOCtrl=0x5008C, TxDescCtrl=0x50090, TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */ TxRingHiAddr=0x5009C, /* 64 bit address extension. */ @@ -409,11 +474,16 @@ CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0, RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0, RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4, - TxMode=0x55000, PerfFilterTable=0x56000, HashTable=0x56100, + TxMode=0x55000, VlanType=0x55064, + PerfFilterTable=0x56000, HashTable=0x56100, TxGfpMem=0x58000, RxGfpMem=0x5a000, }; -/* Bits in the interrupt status/mask registers. */ +/* + * Bits in the interrupt status/mask registers. + * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register + * enables all the interrupt sources that are or'ed into those status bits. + */ enum intr_status_bits { IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000, IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000, @@ -438,7 +508,16 @@ /* Bits in the RxFilterMode register. */ enum rx_mode_bits { AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01, - AcceptMulticast=0x10, AcceptMyPhys=0xE040, + AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30, + PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200, + WakeupOnGFP=0x0800, +}; + +/* Bits in the TxMode register */ +enum tx_mode_bits { + MiiSoftReset=0x8000, MIILoopback=0x4000, + TxFlowEnable=0x0800, RxFlowEnable=0x0400, + PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01, }; /* Bits in the TxDescCtrl register. */ @@ -447,7 +526,8 @@ TxDescSpace128=0x30, TxDescSpace256=0x40, TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02, TxDescType3=0x03, TxDescType4=0x04, - TxNoDMACompletion=0x08, TxDescQ64bit=0x80, + TxNoDMACompletion=0x08, + TxDescQAddr64bit=0x80, TxDescQAddr32bit=0, TxHiPriFIFOThreshShift=24, TxPadLenShift=16, TxDMABurstSizeShift=8, }; @@ -455,81 +535,144 @@ /* Bits in the RxDescQCtrl register. */ enum rx_ctrl_bits { RxBufferLenShift=16, RxMinDescrThreshShift=0, - RxPrefetchMode=0x8000, Rx2048QEntries=0x4000, - RxVariableQ=0x2000, RxDesc64bit=0x1000, - RxDescQAddr64bit=0x0100, + RxPrefetchMode=0x8000, RxVariableQ=0x2000, + Rx2048QEntries=0x4000, Rx256QEntries=0, + RxDescAddr64bit=0x1000, RxDescAddr32bit=0, + RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0, RxDescSpace4=0x000, RxDescSpace8=0x100, RxDescSpace16=0x200, RxDescSpace32=0x300, RxDescSpace64=0x400, RxDescSpace128=0x500, RxConsumerWrEn=0x80, }; +/* Bits in the RxDMACtrl register. */ +enum rx_dmactrl_bits { + RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000, + RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000, + RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000, + RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000, + RxChecksumRejectTCPOnly=0x01000000, + RxCompletionQ2Enable=0x800000, + RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000, + RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000, + RxDMAQ2NonIP=0x400000, + RxUseBackupQueue=0x080000, RxDMACRC=0x040000, + RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8, + RxBurstSizeShift=0, +}; + /* Bits in the RxCompletionAddr register */ enum rx_compl_bits { - RxComplQAddr64bit=0x80, TxComplProducerWrEn=0x40, + RxComplQAddr64bit=0x80, RxComplQAddr32bit=0, + RxComplProducerWrEn=0x40, RxComplType0=0x00, RxComplType1=0x10, RxComplType2=0x20, RxComplType3=0x30, RxComplThreshShift=0, }; +/* Bits in the TxCompletionAddr register */ +enum tx_compl_bits { + TxComplQAddr64bit=0x80, TxComplQAddr32bit=0, + TxComplProducerWrEn=0x40, + TxComplIntrStatus=0x20, + CommonQueueMode=0x10, + TxComplThreshShift=0, +}; + +/* Bits in the GenCtrl register */ +enum gen_ctrl_bits { + RxEnable=0x05, TxEnable=0x0a, + RxGFPEnable=0x10, TxGFPEnable=0x20, +}; + +/* Bits in the IntrTimerCtrl register */ +enum intr_ctrl_bits { + Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100, + SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600, + IntrLatencyMask=0x1f, +}; + /* The Rx and Tx buffer descriptors. */ struct starfire_rx_desc { - u32 rxaddr; /* Optionally 64 bits. */ + dma_addr_t rxaddr; }; enum rx_desc_bits { RxDescValid=1, RxDescEndRing=2, }; -/* Completion queue entry. - You must update the page allocation, init_ring and the shift count in rx() - if using a larger format. */ -#ifdef HAS_FIRMWARE -#define csum_rx_status -#endif /* HAS_FIRMWARE */ -struct rx_done_desc { +/* Completion queue entry. */ +struct short_rx_done_desc { + u32 status; /* Low 16 bits is length. */ +}; +struct basic_rx_done_desc { u32 status; /* Low 16 bits is length. */ -#ifdef csum_rx_status - u32 status2; /* Low 16 bits is csum */ -#endif /* csum_rx_status */ -#ifdef full_rx_status - u32 status2; + u16 vlanid; + u16 status2; +}; +struct csum_rx_done_desc { + u32 status; /* Low 16 bits is length. */ + u16 csum; /* Partial checksum */ + u16 status2; +}; +struct full_rx_done_desc { + u32 status; /* Low 16 bits is length. */ + u16 status3; + u16 status2; u16 vlanid; u16 csum; /* partial checksum */ u32 timestamp; -#endif /* full_rx_status */ }; +/* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */ +#ifdef HAS_FIRMWARE +#ifdef VLAN_SUPPORT +typedef struct full_rx_done_desc rx_done_desc; +#define RxComplType RxComplType3 +#else /* not VLAN_SUPPORT */ +typedef struct csum_rx_done_desc rx_done_desc; +#define RxComplType RxComplType2 +#endif /* not VLAN_SUPPORT */ +#else /* not HAS_FIRMWARE */ +#ifdef VLAN_SUPPORT +typedef struct basic_rx_done_desc rx_done_desc; +#define RxComplType RxComplType1 +#else /* not VLAN_SUPPORT */ +typedef struct short_rx_done_desc rx_done_desc; +#define RxComplType RxComplType0 +#endif /* not VLAN_SUPPORT */ +#endif /* not HAS_FIRMWARE */ + enum rx_done_bits { RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000, }; -#ifdef ZEROCOPY -/* Type 0 Tx descriptor. */ -/* If more fragments are needed, don't forget to change the - descriptor spacing as well! */ -struct starfire_tx_desc { - u32 status; - u32 nbufs; - u32 first_addr; - u16 first_len; - u16 total_len; - struct { - u32 addr; - u32 len; - } frag[MAX_STARFIRE_FRAGS]; -}; -#else /* not ZEROCOPY */ /* Type 1 Tx descriptor. */ -struct starfire_tx_desc { +struct starfire_tx_desc_1 { + u32 status; /* Upper bits are status, lower 16 length. */ + u32 addr; +}; + +/* Type 2 Tx descriptor. */ +struct starfire_tx_desc_2 { u32 status; /* Upper bits are status, lower 16 length. */ - u32 first_addr; + u32 reserved; + u64 addr; }; -#endif /* not ZEROCOPY */ + +#ifdef ADDR_64BITS +typedef struct starfire_tx_desc_2 starfire_tx_desc; +#define TX_DESC_TYPE TxDescType2 +#else /* not ADDR_64BITS */ +typedef struct starfire_tx_desc_1 starfire_tx_desc; +#define TX_DESC_TYPE TxDescType1 +#endif /* not ADDR_64BITS */ +#define TX_DESC_SPACING TxDescSpaceUnlim + enum tx_desc_bits { TxDescID=0xB0000000, TxCRCEn=0x01000000, TxDescIntr=0x08000000, TxRingWrap=0x04000000, TxCalTCP=0x02000000, }; -struct tx_done_report { +struct tx_done_desc { u32 status; /* timestamp, index. */ #if 0 u32 intrstatus; /* interrupt status */ @@ -542,41 +685,45 @@ }; struct tx_ring_info { struct sk_buff *skb; - dma_addr_t first_mapping; -#ifdef ZEROCOPY - dma_addr_t frag_mapping[MAX_STARFIRE_FRAGS]; -#endif /* ZEROCOPY */ + dma_addr_t mapping; + unsigned int used_slots; }; #define PHY_CNT 2 struct netdev_private { /* Descriptor rings first for alignment. */ struct starfire_rx_desc *rx_ring; - struct starfire_tx_desc *tx_ring; + starfire_tx_desc *tx_ring; dma_addr_t rx_ring_dma; dma_addr_t tx_ring_dma; /* The addresses of rx/tx-in-place skbuffs. */ struct rx_ring_info rx_info[RX_RING_SIZE]; struct tx_ring_info tx_info[TX_RING_SIZE]; /* Pointers to completion queues (full pages). */ - struct rx_done_desc *rx_done_q; + rx_done_desc *rx_done_q; dma_addr_t rx_done_q_dma; unsigned int rx_done; - struct tx_done_report *tx_done_q; + struct tx_done_desc *tx_done_q; dma_addr_t tx_done_q_dma; unsigned int tx_done; struct net_device_stats stats; struct pci_dev *pci_dev; +#ifdef VLAN_SUPPORT + struct vlan_group *vlgrp; +#endif + void *queue_mem; + dma_addr_t queue_mem_dma; + size_t queue_mem_size; + /* Frequently used values: keep some adjacent for cache effect. */ spinlock_t lock; unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ - unsigned int cur_tx, dirty_tx; + unsigned int cur_tx, dirty_tx, reap_tx; unsigned int rx_buf_sz; /* Based on MTU+slack. */ - unsigned int tx_full:1, /* The Tx queue is full. */ /* These values keep track of the transceiver/media in use. */ - speed100:1; /* Set if speed == 100MBit. */ - unsigned int intr_mitigation; + int speed100; /* Set if speed == 100MBit. */ u32 tx_mode; + u32 intr_timer_ctrl; u8 tx_threshold; /* MII transceiver section. */ struct mii_if_info mii_if; /* MII lib hooks/info */ @@ -603,6 +750,44 @@ static void netdev_media_change(struct net_device *dev); +#ifdef VLAN_SUPPORT +static void netdev_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) +{ + struct netdev_private *np = dev->priv; + + spin_lock(&np->lock); + if (debug > 2) + printk("%s: Setting vlgrp to %p\n", dev->name, grp); + np->vlgrp = grp; + set_rx_mode(dev); + spin_unlock(&np->lock); +} + +static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +{ + struct netdev_private *np = dev->priv; + + spin_lock(&np->lock); + if (debug > 1) + printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid); + set_rx_mode(dev); + spin_unlock(&np->lock); +} + +static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +{ + struct netdev_private *np = dev->priv; + + spin_lock(&np->lock); + if (debug > 1) + printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid); + if (np->vlgrp) + np->vlgrp->vlan_devices[vid] = NULL; + set_rx_mode(dev); + spin_unlock(&np->lock); +} +#endif /* VLAN_SUPPORT */ + static int __devinit starfire_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -614,10 +799,6 @@ long ioaddr; int drv_flags, io_size; int boguscnt; -#ifndef HAVE_PCI_SET_MWI - u16 cmd; - u8 cache; -#endif /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -634,13 +815,13 @@ ioaddr = pci_resource_start(pdev, 0); io_size = pci_resource_len(pdev, 0); if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) { - printk (KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx); + printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx); return -ENODEV; } dev = alloc_etherdev(sizeof(*np)); if (!dev) { - printk (KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx); + printk(KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx); return -ENOMEM; } SET_MODULE_OWNER(dev); @@ -648,7 +829,7 @@ irq = pdev->irq; if (pci_request_regions (pdev, dev->name)) { - printk (KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx); + printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx); goto err_out_free_netdev; } @@ -656,7 +837,7 @@ #if !defined(CONFIG_SPARC64) || LINUX_VERSION_CODE > 0x20300 ioaddr = (long) ioremap(ioaddr, io_size); if (!ioaddr) { - printk (KERN_ERR DRV_NAME " %d: cannot remap 0x%x @ 0x%lx, aborting\n", + printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n", card_idx, io_size, ioaddr); goto err_out_free_res; } @@ -664,29 +845,26 @@ pci_set_master(pdev); -#ifdef HAVE_PCI_SET_MWI - pci_set_mwi(pdev); -#else /* enable MWI -- it vastly improves Rx performance on sparc64 */ - pci_read_config_word(pdev, PCI_COMMAND, &cmd); - cmd |= PCI_COMMAND_INVALIDATE; - pci_write_config_word(pdev, PCI_COMMAND, cmd); - - /* set PCI cache size */ - pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache); - if ((cache << 2) != SMP_CACHE_BYTES) { - printk(KERN_INFO " PCI cache line size set incorrectly " - "(%i bytes) by BIOS/FW, correcting to %i\n", - (cache << 2), SMP_CACHE_BYTES); - pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, - SMP_CACHE_BYTES >> 2); - } -#endif + pci_set_mwi(pdev); +#ifdef MAX_SKB_FRAGS + dev->features |= NETIF_F_SG; +#endif /* MAX_SKB_FRAGS */ #ifdef ZEROCOPY - /* Starfire can do SG and TCP/UDP checksumming */ - dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; + /* Starfire can do TCP/UDP checksumming */ + if (enable_hw_cksum) + dev->features |= NETIF_F_IP_CSUM; #endif /* ZEROCOPY */ +#ifdef VLAN_SUPPORT + dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; + dev->vlan_rx_register = netdev_vlan_rx_register; + dev->vlan_rx_add_vid = netdev_vlan_rx_add_vid; + dev->vlan_rx_kill_vid = netdev_vlan_rx_kill_vid; +#endif /* VLAN_RX_KILL_VID */ +#ifdef ADDR_64BITS + dev->features |= NETIF_F_HIGHDMA; +#endif /* ADDR_64BITS */ /* Serial EEPROM reads are hidden by the hardware. */ for (i = 0; i < 6; i++) @@ -701,7 +879,7 @@ #endif /* Issue soft reset */ - writel(0x8000, ioaddr + TxMode); + writel(MiiSoftReset, ioaddr + TxMode); udelay(1000); writel(0, ioaddr + TxMode); @@ -747,11 +925,35 @@ np->mii_if.full_duplex = 1; if (np->mii_if.full_duplex) - np->mii_if.force_media = 0; - else np->mii_if.force_media = 1; + else + np->mii_if.force_media = 0; np->speed100 = 1; + /* timer resolution is 128 * 0.8us */ + np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) | + Timer10X | EnableIntrMasking; + + if (small_frames > 0) { + np->intr_timer_ctrl |= SmallFrameBypass; + switch (small_frames) { + case 1 ... 64: + np->intr_timer_ctrl |= SmallFrame64; + break; + case 65 ... 128: + np->intr_timer_ctrl |= SmallFrame128; + break; + case 129 ... 256: + np->intr_timer_ctrl |= SmallFrame256; + break; + default: + np->intr_timer_ctrl |= SmallFrame512; + if (small_frames > 512) + printk("Adjusting small_frames down to 512\n"); + break; + } + } + /* The chip-specific entries in the device structure. */ dev->open = &netdev_open; dev->hard_start_xmit = &start_tx; @@ -764,11 +966,10 @@ if (mtu) dev->mtu = mtu; - i = register_netdev(dev); - if (i) + if (register_netdev(dev)) goto err_out_cleardev; - printk(KERN_INFO "%s: %s at 0x%lx, ", + printk(KERN_INFO "%s: %s at %#lx, ", dev->name, netdrv_tbl[chip_idx].name, ioaddr); for (i = 0; i < 5; i++) printk("%2.2x:", dev->dev_addr[i]); @@ -793,7 +994,7 @@ np->phys[phy_idx++] = phy; np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE); printk(KERN_INFO "%s: MII PHY found at address %d, status " - "0x%4.4x advertising %4.4x.\n", + "%#4.4x advertising %#4.4x.\n", dev->name, phy, mii_status, np->mii_if.advertising); /* there can be only one PHY on-board */ break; @@ -806,14 +1007,8 @@ memset(&np->mii_if, 0, sizeof(np->mii_if)); } -#ifdef ZEROCOPY - printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming enabled.\n", - dev->name); -#else /* not ZEROCOPY */ - printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming disabled.\n", - dev->name); -#endif /* not ZEROCOPY */ - + printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n", + dev->name, enable_hw_cksum ? "enabled" : "disabled"); return 0; err_out_cleardev: @@ -822,7 +1017,6 @@ err_out_free_res: pci_release_regions (pdev); err_out_free_netdev: - unregister_netdev(dev); kfree(dev); return -ENODEV; } @@ -858,6 +1052,7 @@ struct netdev_private *np = dev->priv; long ioaddr = dev->base_addr; int i, retval; + size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size; /* Do we ever need to reset the chip??? */ @@ -875,62 +1070,61 @@ if (debug > 1) printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", dev->name, dev->irq); - /* Allocate the various queues, failing gracefully. */ - if (np->tx_done_q == 0) - np->tx_done_q = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->tx_done_q_dma); - if (np->rx_done_q == 0) - np->rx_done_q = pci_alloc_consistent(np->pci_dev, sizeof(struct rx_done_desc) * DONE_Q_SIZE, &np->rx_done_q_dma); - if (np->tx_ring == 0) - np->tx_ring = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->tx_ring_dma); - if (np->rx_ring == 0) - np->rx_ring = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->rx_ring_dma); - if (np->tx_done_q == 0 || np->rx_done_q == 0 - || np->rx_ring == 0 || np->tx_ring == 0) { - if (np->tx_done_q) - pci_free_consistent(np->pci_dev, PAGE_SIZE, - np->tx_done_q, np->tx_done_q_dma); - if (np->rx_done_q) - pci_free_consistent(np->pci_dev, sizeof(struct rx_done_desc) * DONE_Q_SIZE, - np->rx_done_q, np->rx_done_q_dma); - if (np->tx_ring) - pci_free_consistent(np->pci_dev, PAGE_SIZE, - np->tx_ring, np->tx_ring_dma); - if (np->rx_ring) - pci_free_consistent(np->pci_dev, PAGE_SIZE, - np->rx_ring, np->rx_ring_dma); - COMPAT_MOD_DEC_USE_COUNT; - return -ENOMEM; + + /* Allocate the various queues. */ + if (np->queue_mem == 0) { + tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN; + rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN; + tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN; + rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE; + np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size; + np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma); + if (np->queue_mem == 0) { + COMPAT_MOD_DEC_USE_COUNT; + return -ENOMEM; + } + + np->tx_done_q = np->queue_mem; + np->tx_done_q_dma = np->queue_mem_dma; + np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size; + np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size; + np->tx_ring = (void *) np->rx_done_q + rx_done_q_size; + np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size; + np->rx_ring = (void *) np->tx_ring + tx_ring_size; + np->rx_ring_dma = np->tx_ring_dma + tx_ring_size; } + /* Start with no carrier, it gets adjusted later */ netif_carrier_off(dev); init_ring(dev); /* Set the size of the Rx buffers. */ writel((np->rx_buf_sz << RxBufferLenShift) | (0 << RxMinDescrThreshShift) | RxPrefetchMode | RxVariableQ | + RX_Q_ENTRIES | + RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE | RxDescSpace4, ioaddr + RxDescQCtrl); -#ifdef ZEROCOPY - /* Set Tx descriptor to type 0 and spacing to 64 bytes. */ - writel((2 << TxHiPriFIFOThreshShift) | - (0 << TxPadLenShift) | - (4 << TxDMABurstSizeShift) | - TxDescSpace64 | TxDescType0, - ioaddr + TxDescCtrl); -#else /* not ZEROCOPY */ - /* Set Tx descriptor to type 1 and padding to 0 bytes. */ + /* Set up the Rx DMA controller. */ + writel(RxChecksumIgnore | + (0 << RxEarlyIntThreshShift) | + (6 << RxHighPrioThreshShift) | + ((DMA_BURST_SIZE / 32) << RxBurstSizeShift), + ioaddr + RxDMACtrl); + + /* Set Tx descriptor */ writel((2 << TxHiPriFIFOThreshShift) | (0 << TxPadLenShift) | - (4 << TxDMABurstSizeShift) | - TxDescSpaceUnlim | TxDescType1, + ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) | + TX_DESC_Q_ADDR_SIZE | + TX_DESC_SPACING | TX_DESC_TYPE, ioaddr + TxDescCtrl); -#endif /* not ZEROCOPY */ -#if defined(ADDR_64BITS) && defined(__alpha__) - /* XXX We really need a 64-bit PCI dma interfaces too... -DaveM */ - writel(np->rx_ring_dma >> 32, ioaddr + RxDescQHiAddr); - writel(np->tx_ring_dma >> 32, ioaddr + TxRingHiAddr); +#if defined(ADDR_64BITS) + writel(np->queue_mem_dma >> 32, ioaddr + RxDescQHiAddr); + writel(np->queue_mem_dma >> 32, ioaddr + TxRingHiAddr); + writel(np->queue_mem_dma >> 32, ioaddr + CompletionHiAddr); #else writel(0, ioaddr + RxDescQHiAddr); writel(0, ioaddr + TxRingHiAddr); @@ -940,32 +1134,23 @@ writel(np->tx_ring_dma, ioaddr + TxRingPtr); writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr); -#ifdef full_rx_status writel(np->rx_done_q_dma | - RxComplType3 | + RxComplType | (0 << RxComplThreshShift), ioaddr + RxCompletionAddr); -#else /* not full_rx_status */ -#ifdef csum_rx_status - writel(np->rx_done_q_dma | - RxComplType2 | - (0 << RxComplThreshShift), - ioaddr + RxCompletionAddr); -#else /* not csum_rx_status */ - writel(np->rx_done_q_dma | - RxComplType0 | - (0 << RxComplThreshShift), - ioaddr + RxCompletionAddr); -#endif /* not csum_rx_status */ -#endif /* not full_rx_status */ if (debug > 1) printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name); - /* Fill both the unused Tx SA register and the Rx perfect filter. */ + /* Fill both the Tx SA register and the Rx perfect filter. */ for (i = 0; i < 6; i++) - writeb(dev->dev_addr[i], ioaddr + StationAddr + 5 - i); - for (i = 0; i < 16; i++) { + writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i); + /* The first entry is special because it bypasses the VLAN filter. + Don't use it. */ + writew(0, ioaddr + PerfFilterTable); + writew(0, ioaddr + PerfFilterTable + 4); + writew(0, ioaddr + PerfFilterTable + 8); + for (i = 1; i < 16; i++) { u16 *eaddrs = (u16 *)dev->dev_addr; long setup_frm = ioaddr + PerfFilterTable + i * 16; writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4; @@ -975,16 +1160,14 @@ /* Initialize other registers. */ /* Configure the PCI bus bursts and FIFO thresholds. */ - np->tx_mode = 0x0C04; /* modified when link is up. */ - writel(0x8000 | np->tx_mode, ioaddr + TxMode); + np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */ + writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode); udelay(1000); writel(np->tx_mode, ioaddr + TxMode); np->tx_threshold = 4; writel(np->tx_threshold, ioaddr + TxThreshold); - interrupt_mitigation &= 0x1f; - np->intr_mitigation = interrupt_mitigation; - writel(np->intr_mitigation, ioaddr + IntrTimerCtrl); + writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl); netif_start_if(dev); netif_start_queue(dev); @@ -999,29 +1182,35 @@ /* Enable GPIO interrupts on link change */ writel(0x0f00ff00, ioaddr + GPIOCtrl); - /* Set the interrupt mask and enable PCI interrupts. */ + /* Set the interrupt mask */ writel(IntrRxDone | IntrRxEmpty | IntrDMAErr | - IntrTxDone | IntrStatsMax | IntrLinkChange | - IntrNormalSummary | IntrAbnormalSummary | + IntrTxDMADone | IntrStatsMax | IntrLinkChange | IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID, ioaddr + IntrEnable); + /* Enable PCI interrupts. */ writel(0x00800000 | readl(ioaddr + PCIDeviceConfig), ioaddr + PCIDeviceConfig); +#ifdef VLAN_SUPPORT + /* Set VLAN type to 802.1q */ + writel(ETH_P_8021Q, ioaddr + VlanType); +#endif /* VLAN_SUPPORT */ + #ifdef HAS_FIRMWARE /* Load Rx/Tx firmware into the frame processors */ for (i = 0; i < FIRMWARE_RX_SIZE * 2; i++) writel(firmware_rx[i], ioaddr + RxGfpMem + i * 4); for (i = 0; i < FIRMWARE_TX_SIZE * 2; i++) writel(firmware_tx[i], ioaddr + TxGfpMem + i * 4); - /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */ - writel(0x003F, ioaddr + GenCtrl); -#else /* not HAS_FIRMWARE */ - /* Enable the Rx and Tx units only. */ - writel(0x000F, ioaddr + GenCtrl); -#endif /* not HAS_FIRMWARE */ +#endif /* HAS_FIRMWARE */ + if (enable_hw_cksum) + /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */ + writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl); + else + /* Enable the Rx and Tx units only. */ + writel(TxEnable|RxEnable, ioaddr + GenCtrl); - if (debug > 2) + if (debug > 1) printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name); @@ -1033,11 +1222,17 @@ { struct netdev_private *np = dev->priv; u16 reg0; + int silly_count = 1000; mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising); mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET); udelay(500); - while (mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET); + while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET) + /* do nothing */; + if (!silly_count) { + printk("%s: MII reset failed!\n", dev->name); + return; + } reg0 = mdio_read(dev, np->phys[0], MII_BMCR); @@ -1062,25 +1257,22 @@ { struct netdev_private *np = dev->priv; long ioaddr = dev->base_addr; + int old_debug; - printk(KERN_WARNING "%s: Transmit timed out, status %8.8x," - " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus)); - -#ifndef __alpha__ - { - int i; - printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); - for (i = 0; i < RX_RING_SIZE; i++) - printk(" %8.8x", (unsigned int)le32_to_cpu(np->rx_ring[i].rxaddr)); - printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring); - for (i = 0; i < TX_RING_SIZE; i++) - printk(" %4.4x", le32_to_cpu(np->tx_ring[i].status)); - printk("\n"); - } -#endif + printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, " + "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus)); /* Perhaps we should reinitialize the hardware here. */ - /* Stop and restart the chip's Tx processes . */ + + /* + * Stop and restart the interface. + * Cheat and increase the debug level temporarily. + */ + old_debug = debug; + debug = 2; + netdev_close(dev); + netdev_open(dev); + debug = old_debug; /* Trigger an immediate transmit demand. */ @@ -1096,9 +1288,8 @@ struct netdev_private *np = dev->priv; int i; - np->tx_full = 0; - np->cur_rx = np->cur_tx = 0; - np->dirty_rx = np->rx_done = np->dirty_tx = np->tx_done = 0; + np->cur_rx = np->cur_tx = np->reap_tx = 0; + np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0; np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); @@ -1111,7 +1302,7 @@ np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE); skb->dev = dev; /* Mark as being used by this device. */ /* Grrr, we cannot offset to correctly align the IP header. */ - np->rx_ring[i].rxaddr = cpu_to_le32(np->rx_info[i].mapping | RxDescValid); + np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); } writew(i - 1, dev->base_addr + RxDescQIdx); np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); @@ -1123,7 +1314,7 @@ np->rx_info[i].mapping = 0; } /* Mark the last entry as wrapping the ring. */ - np->rx_ring[i-1].rxaddr |= cpu_to_le32(RxDescEndRing); + np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing); /* Clear the completion rings. */ for (i = 0; i < DONE_Q_SIZE; i++) { @@ -1131,18 +1322,9 @@ np->tx_done_q[i].status = 0; } - for (i = 0; i < TX_RING_SIZE; i++) { - np->tx_info[i].skb = NULL; - np->tx_info[i].first_mapping = 0; -#ifdef ZEROCOPY - { - int j; - for (j = 0; j < MAX_STARFIRE_FRAGS; j++) - np->tx_info[i].frag_mapping[j] = 0; - } -#endif /* ZEROCOPY */ - np->tx_ring[i].status = 0; - } + for (i = 0; i < TX_RING_SIZE; i++) + memset(&np->tx_info[i], 0, sizeof(np->tx_info[i])); + return; } @@ -1151,19 +1333,21 @@ { struct netdev_private *np = dev->priv; unsigned int entry; -#ifdef ZEROCOPY + u32 status; int i; -#endif kick_tx_timer(dev, tx_timeout, TX_TIMEOUT); - /* Caution: the write order is important here, set the field - with the "ownership" bits last. */ - - /* Calculate the next Tx descriptor entry. */ - entry = np->cur_tx % TX_RING_SIZE; + /* + * be cautious here, wrapping the queue has weird semantics + * and we may not have enough slots even when it seems we do. + */ + if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) { + netif_stop_queue(dev); + return 1; + } -#if defined(ZEROCOPY) && defined(HAS_FIRMWARE) && defined(HAS_BROKEN_FIRMWARE) +#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) { int has_bad_length = 0; @@ -1180,85 +1364,72 @@ if (has_bad_length) skb_checksum_help(skb); } -#endif /* ZEROCOPY && HAS_FIRMWARE && HAS_BROKEN_FIRMWARE */ +#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ - np->tx_info[entry].skb = skb; - np->tx_info[entry].first_mapping = - pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE); - - np->tx_ring[entry].first_addr = cpu_to_le32(np->tx_info[entry].first_mapping); -#ifdef ZEROCOPY - np->tx_ring[entry].first_len = cpu_to_le16(skb_first_frag_len(skb)); - np->tx_ring[entry].total_len = cpu_to_le16(skb->len); - /* Add "| TxDescIntr" to generate Tx-done interrupts. */ - np->tx_ring[entry].status = cpu_to_le32(TxDescID | TxCRCEn); - np->tx_ring[entry].nbufs = cpu_to_le32(skb_shinfo(skb)->nr_frags + 1); -#else /* not ZEROCOPY */ - /* Add "| TxDescIntr" to generate Tx-done interrupts. */ - np->tx_ring[entry].status = cpu_to_le32(skb->len | TxDescID | TxCRCEn | 1 << 16); -#endif /* not ZEROCOPY */ - - if (entry >= TX_RING_SIZE-1) /* Wrap ring */ - np->tx_ring[entry].status |= cpu_to_le32(TxRingWrap | TxDescIntr); - -#ifdef ZEROCOPY - if (skb->ip_summed == CHECKSUM_HW) { - np->tx_ring[entry].status |= cpu_to_le32(TxCalTCP); - np->stats.tx_compressed++; - } -#endif /* ZEROCOPY */ - - if (debug > 5) { -#ifdef ZEROCOPY - printk(KERN_DEBUG "%s: Tx #%d slot %d status %8.8x nbufs %d len %4.4x/%4.4x.\n", - dev->name, np->cur_tx, entry, - le32_to_cpu(np->tx_ring[entry].status), - le32_to_cpu(np->tx_ring[entry].nbufs), - le32_to_cpu(np->tx_ring[entry].first_len), - le32_to_cpu(np->tx_ring[entry].total_len)); -#else /* not ZEROCOPY */ - printk(KERN_DEBUG "%s: Tx #%d slot %d status %8.8x.\n", - dev->name, np->cur_tx, entry, - le32_to_cpu(np->tx_ring[entry].status)); -#endif /* not ZEROCOPY */ - } - -#ifdef ZEROCOPY - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i]; + entry = np->cur_tx % TX_RING_SIZE; + for (i = 0; i < skb_num_frags(skb); i++) { + int wrap_ring = 0; + status = TxDescID; + + if (i == 0) { + np->tx_info[entry].skb = skb; + status |= TxCRCEn; + if (entry >= TX_RING_SIZE - skb_num_frags(skb)) { + status |= TxRingWrap; + wrap_ring = 1; + } + if (np->reap_tx) { + status |= TxDescIntr; + np->reap_tx = 0; + } + if (skb->ip_summed == CHECKSUM_HW) { + status |= TxCalTCP; + np->stats.tx_compressed++; + } + status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16); - /* we already have the proper value in entry */ - np->tx_info[entry].frag_mapping[i] = - pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE); - - np->tx_ring[entry].frag[i].addr = cpu_to_le32(np->tx_info[entry].frag_mapping[i]); - np->tx_ring[entry].frag[i].len = cpu_to_le32(this_frag->size); - if (debug > 5) { - printk(KERN_DEBUG "%s: Tx #%d frag %d len %4.4x.\n", - dev->name, np->cur_tx, i, - le32_to_cpu(np->tx_ring[entry].frag[i].len)); - } + np->tx_info[entry].mapping = + pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE); + } else { +#ifdef MAX_SKB_FRAGS + skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1]; + status |= this_frag->size; + np->tx_info[entry].mapping = + pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE); +#endif /* MAX_SKB_FRAGS */ + } + + np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); + np->tx_ring[entry].status = cpu_to_le32(status); + if (debug > 3) + printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n", + dev->name, np->cur_tx, np->dirty_tx, + entry, status); + if (wrap_ring) { + np->tx_info[entry].used_slots = TX_RING_SIZE - entry; + np->cur_tx += np->tx_info[entry].used_slots; + entry = 0; + } else { + np->tx_info[entry].used_slots = 1; + np->cur_tx += np->tx_info[entry].used_slots; + entry++; + } + /* scavenge the tx descriptors twice per TX_RING_SIZE */ + if (np->cur_tx % (TX_RING_SIZE / 2) == 0) + np->reap_tx = 1; } -#endif /* ZEROCOPY */ - - np->cur_tx++; - - if (entry >= TX_RING_SIZE-1) /* Wrap ring */ - entry = -1; - entry++; /* Non-x86: explicitly flush descriptor cache lines here. */ - /* Ensure everything is written back above before the transmit is + /* Ensure all descriptors are written back before the transmit is initiated. - Jes */ wmb(); /* Update the producer index. */ - writel(entry * (sizeof(struct starfire_tx_desc) / 8), dev->base_addr + TxProducerIdx); + writel(entry * (sizeof(starfire_tx_desc) / 8), dev->base_addr + TxProducerIdx); - if (np->cur_tx - np->dirty_tx >= TX_RING_SIZE - 1) { - np->tx_full = 1; + /* 4 is arbitrary, but should be ok */ + if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE) netif_stop_queue(dev); - } dev->trans_start = jiffies; @@ -1270,20 +1441,13 @@ after the Tx thread. */ static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) { - struct net_device *dev = (struct net_device *)dev_instance; + struct net_device *dev = dev_instance; struct netdev_private *np; long ioaddr; int boguscnt = max_interrupt_work; int consumer; int tx_status; -#ifndef final_version /* Can never occur. */ - if (dev == NULL) { - printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown device.\n", irq); - return; - } -#endif - ioaddr = dev->base_addr; np = dev->priv; @@ -1291,83 +1455,69 @@ u32 intr_status = readl(ioaddr + IntrClear); if (debug > 4) - printk(KERN_DEBUG "%s: Interrupt status %4.4x.\n", + printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n", dev->name, intr_status); - if (intr_status == 0) + if (intr_status == 0 || intr_status == (u32) -1) break; - if (intr_status & IntrRxDone) + if (intr_status & (IntrRxDone | IntrRxEmpty)) netdev_rx(dev); /* Scavenge the skbuff list based on the Tx-done queue. There are redundant checks here that may be cleaned up after the driver has proven to be reliable. */ consumer = readl(ioaddr + TxConsumerIdx); - if (debug > 4) + if (debug > 3) printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n", dev->name, consumer); -#if 0 - if (np->tx_done >= 250 || np->tx_done == 0) - printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x, %d is %8.8x.\n", - dev->name, np->tx_done, - le32_to_cpu(np->tx_done_q[np->tx_done].status), - (np->tx_done+1) & (DONE_Q_SIZE-1), - le32_to_cpu(np->tx_done_q[(np->tx_done+1)&(DONE_Q_SIZE-1)].status)); -#endif while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) { - if (debug > 4) - printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x.\n", - dev->name, np->tx_done, tx_status); + if (debug > 3) + printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n", + dev->name, np->dirty_tx, np->tx_done, tx_status); if ((tx_status & 0xe0000000) == 0xa0000000) { np->stats.tx_packets++; } else if ((tx_status & 0xe0000000) == 0x80000000) { - struct sk_buff *skb; -#ifdef ZEROCOPY - int i; -#endif /* ZEROCOPY */ - u16 entry = tx_status; /* Implicit truncate */ - entry /= sizeof(struct starfire_tx_desc); - - skb = np->tx_info[entry].skb; + u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc); + struct sk_buff *skb = np->tx_info[entry].skb; np->tx_info[entry].skb = NULL; pci_unmap_single(np->pci_dev, - np->tx_info[entry].first_mapping, + np->tx_info[entry].mapping, skb_first_frag_len(skb), PCI_DMA_TODEVICE); - np->tx_info[entry].first_mapping = 0; - -#ifdef ZEROCOPY - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - pci_unmap_single(np->pci_dev, - np->tx_info[entry].frag_mapping[i], - skb_shinfo(skb)->frags[i].size, - PCI_DMA_TODEVICE); - np->tx_info[entry].frag_mapping[i] = 0; + np->tx_info[entry].mapping = 0; + np->dirty_tx += np->tx_info[entry].used_slots; + entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE; +#ifdef MAX_SKB_FRAGS + { + int i; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + pci_unmap_single(np->pci_dev, + np->tx_info[entry].mapping, + skb_shinfo(skb)->frags[i].size, + PCI_DMA_TODEVICE); + np->dirty_tx++; + entry++; + } } -#endif /* ZEROCOPY */ - - /* Scavenge the descriptor. */ +#endif /* MAX_SKB_FRAGS */ dev_kfree_skb_irq(skb); - - np->dirty_tx++; } np->tx_done_q[np->tx_done].status = 0; - np->tx_done = (np->tx_done+1) & (DONE_Q_SIZE-1); + np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE; } writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2); - if (np->tx_full && np->cur_tx - np->dirty_tx < TX_RING_SIZE - 4) { + if (netif_queue_stopped(dev) && + (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) { /* The ring is no longer full, wake the queue. */ - np->tx_full = 0; netif_wake_queue(dev); } /* Stats overflow */ - if (intr_status & IntrStatsMax) { + if (intr_status & IntrStatsMax) get_stats(dev); - } /* Media change interrupt. */ if (intr_status & IntrLinkChange) @@ -1378,28 +1528,17 @@ netdev_error(dev, intr_status); if (--boguscnt < 0) { - printk(KERN_WARNING "%s: Too much work at interrupt, " - "status=0x%4.4x.\n", - dev->name, intr_status); + if (debug > 1) + printk(KERN_WARNING "%s: Too much work at interrupt, " + "status=%#8.8x.\n", + dev->name, intr_status); break; } } while (1); if (debug > 4) - printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", - dev->name, (int)readl(ioaddr + IntrStatus)); - -#ifndef final_version - /* Code that should never be run! Remove after testing.. */ - { - static int stopit = 10; - if (!netif_running(dev) && --stopit < 0) { - printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n", - dev->name); - free_irq(irq, dev); - } - } -#endif + printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n", + dev->name, (int) readl(ioaddr + IntrStatus)); } @@ -1411,26 +1550,21 @@ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx; u32 desc_status; - if (np->rx_done_q == 0) { - printk(KERN_ERR "%s: rx_done_q is NULL! rx_done is %d. %p.\n", - dev->name, np->rx_done, np->tx_done_q); - return 0; - } - /* If EOP is set on the next entry, it's a new packet. Send it up. */ while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) { struct sk_buff *skb; u16 pkt_len; int entry; + rx_done_desc *desc = &np->rx_done_q[np->rx_done]; if (debug > 4) - printk(KERN_DEBUG " netdev_rx() status of %d was %8.8x.\n", np->rx_done, desc_status); + printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status); if (--boguscnt < 0) break; - if ( ! (desc_status & RxOK)) { + if (!(desc_status & RxOK)) { /* There was a error. */ if (debug > 2) - printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n", desc_status); + printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status); np->stats.rx_errors++; if (desc_status & RxFIFOErr) np->stats.rx_fifo_errors++; @@ -1440,10 +1574,8 @@ pkt_len = desc_status; /* Implicitly Truncate */ entry = (desc_status >> 16) & 0x7ff; -#ifndef final_version if (debug > 4) printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, bogus_cnt %d.\n", pkt_len, boguscnt); -#endif /* Check if the packet is long enough to accept without copying to a minimally-sized skbuff. */ if (pkt_len < rx_copybreak @@ -1453,12 +1585,8 @@ pci_dma_sync_single(np->pci_dev, np->rx_info[entry].mapping, pkt_len, PCI_DMA_FROMDEVICE); -#if HAS_IP_COPYSUM /* Call copy + cksum if available. */ eth_copy_and_sum(skb, np->rx_info[entry].skb->tail, pkt_len, 0); skb_put(skb, pkt_len); -#else - memcpy(skb_put(skb, pkt_len), np->rx_info[entry].skb->tail, pkt_len); -#endif } else { pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE); skb = np->rx_info[entry].skb; @@ -1470,44 +1598,54 @@ /* You will want this info for the initial debug. */ if (debug > 5) printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:" - "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x " - "%d.%d.%d.%d.\n", + "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x.\n", skb->data[0], skb->data[1], skb->data[2], skb->data[3], skb->data[4], skb->data[5], skb->data[6], skb->data[7], skb->data[8], skb->data[9], skb->data[10], - skb->data[11], skb->data[12], skb->data[13], - skb->data[14], skb->data[15], skb->data[16], - skb->data[17]); + skb->data[11], skb->data[12], skb->data[13]); #endif + skb->protocol = eth_type_trans(skb, dev); -#if defined(full_rx_status) || defined(csum_rx_status) - if (le32_to_cpu(np->rx_done_q[np->rx_done].status2) & 0x01000000) { +#if defined(HAS_FIRMWARE) || defined(VLAN_SUPPORT) + if (debug > 4) + printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2)); +#endif +#ifdef HAS_FIRMWARE + if (le16_to_cpu(desc->status2) & 0x0100) { skb->ip_summed = CHECKSUM_UNNECESSARY; np->stats.rx_compressed++; } /* * This feature doesn't seem to be working, at least * with the two firmware versions I have. If the GFP sees - * a fragment, it either ignores it completely, or reports + * an IP fragment, it either ignores it completely, or reports * "bad checksum" on it. * * Maybe I missed something -- corrections are welcome. * Until then, the printk stays. :-) -Ion */ - else if (le32_to_cpu(np->rx_done_q[np->rx_done].status2) & 0x00400000) { + else if (le16_to_cpu(desc->status2) & 0x0040) { skb->ip_summed = CHECKSUM_HW; - skb->csum = le32_to_cpu(np->rx_done_q[np->rx_done].status2) & 0xffff; - printk(KERN_DEBUG "%s: checksum_hw, status2 = %x\n", dev->name, np->rx_done_q[np->rx_done].status2); + skb->csum = le16_to_cpu(desc->csum); + printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2)); } -#endif - netif_rx(skb); +#endif /* HAS_FIRMWARE */ +#ifdef VLAN_SUPPORT + if (np->vlgrp && le16_to_cpu(desc->status2) & 0x0200) { + if (debug > 4) + printk(KERN_DEBUG " netdev_rx() vlanid = %d\n", le16_to_cpu(desc->vlanid)); + /* vlan_hwaccel_rx expects a packet with the VLAN tag stripped out */ + vlan_hwaccel_rx(skb, np->vlgrp, le16_to_cpu(desc->vlanid) & VLAN_VID_MASK); + } else +#endif /* VLAN_SUPPORT */ + netif_rx(skb); dev->last_rx = jiffies; np->stats.rx_packets++; -next_rx: + next_rx: np->cur_rx++; - np->rx_done_q[np->rx_done].status = 0; - np->rx_done = (np->rx_done + 1) & (DONE_Q_SIZE-1); + desc->status = 0; + np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE; } writew(np->rx_done, dev->base_addr + CompletionQConsumerIdx); @@ -1524,16 +1662,16 @@ pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE); skb->dev = dev; /* Mark as being used by this device. */ np->rx_ring[entry].rxaddr = - cpu_to_le32(np->rx_info[entry].mapping | RxDescValid); + cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); } if (entry == RX_RING_SIZE - 1) - np->rx_ring[entry].rxaddr |= cpu_to_le32(RxDescEndRing); + np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing); /* We could defer this until later... */ writew(entry, dev->base_addr + RxDescQIdx); } if (debug > 5) - printk(KERN_DEBUG " exiting netdev_rx() status of %d was %8.8x.\n", + printk(KERN_DEBUG " exiting netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status); /* Restart Rx engine if stopped. */ @@ -1547,6 +1685,7 @@ long ioaddr = dev->base_addr; u16 reg0, reg1, reg4, reg5; u32 new_tx_mode; + u32 new_intr_timer_ctrl; /* reset status first */ mdio_read(dev, np->phys[0], MII_BMCR); @@ -1591,15 +1730,23 @@ np->speed100 ? "100" : "10", np->mii_if.full_duplex ? "full" : "half"); - new_tx_mode = np->tx_mode & ~0x2; /* duplex setting */ + new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */ if (np->mii_if.full_duplex) - new_tx_mode |= 2; + new_tx_mode |= FullDuplex; if (np->tx_mode != new_tx_mode) { np->tx_mode = new_tx_mode; - writel(np->tx_mode | 0x8000, ioaddr + TxMode); + writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode); udelay(1000); writel(np->tx_mode, ioaddr + TxMode); } + + new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X; + if (np->speed100) + new_intr_timer_ctrl |= Timer10X; + if (np->intr_timer_ctrl != new_intr_timer_ctrl) { + np->intr_timer_ctrl = new_intr_timer_ctrl; + writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl); + } } else { netif_carrier_off(dev); printk(KERN_DEBUG "%s: Link is down\n", dev->name); @@ -1613,9 +1760,12 @@ /* Came close to underrunning the Tx FIFO, increase threshold. */ if (intr_status & IntrTxDataLow) { - writel(++np->tx_threshold, dev->base_addr + TxThreshold); - printk(KERN_NOTICE "%s: Increasing Tx FIFO threshold to %d bytes\n", - dev->name, np->tx_threshold * 16); + if (np->tx_threshold <= PKT_BUF_SZ / 16) { + writel(++np->tx_threshold, dev->base_addr + TxThreshold); + printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n", + dev->name, np->tx_threshold * 16); + } else + printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name); } if (intr_status & IntrRxGFPDead) { np->stats.rx_fifo_errors++; @@ -1626,7 +1776,7 @@ np->stats.tx_errors++; } if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug) - printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n", + printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n", dev->name, intr_status); } @@ -1661,39 +1811,67 @@ /* Chips may use the upper or lower CRC bits, and may reverse and/or invert them. Select the endian-ness that results in minimal calculations. */ - static void set_rx_mode(struct net_device *dev) { long ioaddr = dev->base_addr; - u32 rx_mode; + u32 rx_mode = MinVLANPrio; struct dev_mc_list *mclist; int i; +#ifdef VLAN_SUPPORT + struct netdev_private *np = dev->priv; + + rx_mode |= VlanMode; + if (np->vlgrp) { + int vlan_count = 0; + long filter_addr = ioaddr + HashTable + 8; + for (i = 0; i < VLAN_VID_MASK; i++) { + if (np->vlgrp->vlan_devices[i]) { + if (vlan_count >= 32) + break; + writew(cpu_to_be16(i), filter_addr); + filter_addr += 16; + vlan_count++; + } + } + if (i == VLAN_VID_MASK) { + rx_mode |= PerfectFilterVlan; + while (vlan_count < 32) { + writew(0, filter_addr); + filter_addr += 16; + vlan_count++; + } + } + } +#endif /* VLAN_SUPPORT */ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ - rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptAll|AcceptMyPhys; + rx_mode |= AcceptAll; } else if ((dev->mc_count > multicast_filter_limit) || (dev->flags & IFF_ALLMULTI)) { /* Too many to match, or accept all multicasts. */ - rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptMyPhys; - } else if (dev->mc_count <= 15) { - /* Use the 16 element perfect filter, skip first entry. */ - long filter_addr = ioaddr + PerfFilterTable + 1 * 16; - for (i = 1, mclist = dev->mc_list; mclist && i <= dev->mc_count; + rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter; + } else if (dev->mc_count <= 14) { + /* Use the 16 element perfect filter, skip first two entries. */ + long filter_addr = ioaddr + PerfFilterTable + 2 * 16; + u16 *eaddrs; + for (i = 2, mclist = dev->mc_list; mclist && i < dev->mc_count + 2; i++, mclist = mclist->next) { - u16 *eaddrs = (u16 *)mclist->dmi_addr; + eaddrs = (u16 *)mclist->dmi_addr; writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 4; writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4; writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 8; } + eaddrs = (u16 *)dev->dev_addr; while (i++ < 16) { - writew(0xffff, filter_addr); filter_addr += 4; - writew(0xffff, filter_addr); filter_addr += 4; - writew(0xffff, filter_addr); filter_addr += 8; + writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4; + writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4; + writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8; } - rx_mode = AcceptBroadcast | AcceptMyPhys; + rx_mode |= AcceptBroadcast|PerfectFilter; } else { /* Must use a multicast hash table. */ long filter_addr; + u16 *eaddrs; u16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */ memset(mc_filter, 0, sizeof(mc_filter)); @@ -1704,16 +1882,17 @@ *fptr |= cpu_to_le32(1 << (bit_nr & 31)); } - /* Clear the perfect filter list, skip first entry. */ - filter_addr = ioaddr + PerfFilterTable + 1 * 16; - for (i = 1; i < 16; i++) { - writew(0xffff, filter_addr); filter_addr += 4; - writew(0xffff, filter_addr); filter_addr += 4; - writew(0xffff, filter_addr); filter_addr += 8; + /* Clear the perfect filter list, skip first two entries. */ + filter_addr = ioaddr + PerfFilterTable + 2 * 16; + eaddrs = (u16 *)dev->dev_addr; + for (i = 2; i < 16; i++) { + writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4; + writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4; + writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8; } - for (filter_addr = ioaddr + HashTable, i=0; i < 32; filter_addr+= 16, i++) + for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++) writew(mc_filter[i], filter_addr); - rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; + rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter; } writel(rx_mode, ioaddr + RxFilterMode); } @@ -1760,6 +1939,7 @@ spin_lock_irq(&np->lock); r = mii_ethtool_sset(&np->mii_if, &ecmd); spin_unlock_irq(&np->lock); + check_duplex(dev); return r; } /* restart autonegotiation */ @@ -1813,7 +1993,7 @@ spin_lock_irq(&np->lock); rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL); spin_unlock_irq(&np->lock); - + if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0])) check_duplex(dev); } @@ -1831,41 +2011,42 @@ netif_stop_if(dev); if (debug > 1) { - printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %4.4x.\n", - dev->name, (int)readl(ioaddr + IntrStatus)); - printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", - dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); + printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n", + dev->name, (int) readl(ioaddr + IntrStatus)); + printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", + dev->name, np->cur_tx, np->dirty_tx, + np->cur_rx, np->dirty_rx); } /* Disable interrupts by clearing the interrupt mask. */ writel(0, ioaddr + IntrEnable); /* Stop the chip's Tx and Rx processes. */ + writel(0, ioaddr + GenCtrl); + readl(ioaddr + GenCtrl); -#ifdef __i386__ - if (debug > 2) { - printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n", - np->tx_ring_dma); + if (debug > 5) { + printk(KERN_DEBUG" Tx ring at %#llx:\n", + (long long) np->tx_ring_dma); for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++) - printk(KERN_DEBUG " #%d desc. %8.8x %8.8x -> %8.8x.\n", + printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n", i, le32_to_cpu(np->tx_ring[i].status), - le32_to_cpu(np->tx_ring[i].first_addr), + (long long) dma_to_cpu(np->tx_ring[i].addr), le32_to_cpu(np->tx_done_q[i].status)); - printk(KERN_DEBUG " Rx ring at %8.8x -> %p:\n", - np->rx_ring_dma, np->rx_done_q); + printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n", + (long long) np->rx_ring_dma, np->rx_done_q); if (np->rx_done_q) for (i = 0; i < 8 /* RX_RING_SIZE */; i++) { - printk(KERN_DEBUG " #%d desc. %8.8x -> %8.8x\n", - i, le32_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status)); + printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n", + i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status)); } } -#endif /* __i386__ debugging only */ free_irq(dev->irq, dev); /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { - np->rx_ring[i].rxaddr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ + np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */ if (np->rx_info[i].skb != NULL) { pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb(np->rx_info[i].skb); @@ -1875,28 +2056,14 @@ } for (i = 0; i < TX_RING_SIZE; i++) { struct sk_buff *skb = np->tx_info[i].skb; -#ifdef ZEROCOPY - int j; -#endif /* ZEROCOPY */ if (skb == NULL) continue; pci_unmap_single(np->pci_dev, - np->tx_info[i].first_mapping, + np->tx_info[i].mapping, skb_first_frag_len(skb), PCI_DMA_TODEVICE); - np->tx_info[i].first_mapping = 0; + np->tx_info[i].mapping = 0; dev_kfree_skb(skb); np->tx_info[i].skb = NULL; -#ifdef ZEROCOPY - for (j = 0; j < MAX_STARFIRE_FRAGS; j++) - if (np->tx_info[i].frag_mapping[j]) { - pci_unmap_single(np->pci_dev, - np->tx_info[i].frag_mapping[j], - skb_shinfo(skb)->frags[j].size, - PCI_DMA_TODEVICE); - np->tx_info[i].frag_mapping[j] = 0; - } else - break; -#endif /* ZEROCOPY */ } COMPAT_MOD_DEC_USE_COUNT; @@ -1914,19 +2081,8 @@ BUG(); np = dev->priv; - if (np->tx_done_q) - pci_free_consistent(pdev, PAGE_SIZE, - np->tx_done_q, np->tx_done_q_dma); - if (np->rx_done_q) - pci_free_consistent(pdev, - sizeof(struct rx_done_desc) * DONE_Q_SIZE, - np->rx_done_q, np->rx_done_q_dma); - if (np->tx_ring) - pci_free_consistent(pdev, PAGE_SIZE, - np->tx_ring, np->tx_ring_dma); - if (np->rx_ring) - pci_free_consistent(pdev, PAGE_SIZE, - np->rx_ring, np->rx_ring_dma); + if (np->queue_mem) + pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma); unregister_netdev(dev); iounmap((char *)dev->base_addr); @@ -1951,6 +2107,17 @@ #ifdef MODULE printk(version); #endif +#ifndef ADDR_64BITS + /* we can do this test only at run-time... sigh */ + if (sizeof(dma_addr_t) == sizeof(u64)) { + printk("This driver has not been ported to this 64-bit architecture yet\n"); + return -ENODEV; + } +#endif /* not ADDR_64BITS */ +#ifndef HAS_FIRMWARE + /* unconditionally disable hw cksums if firmware is not present */ + enable_hw_cksum = 0; +#endif /* not HAS_FIRMWARE */ return pci_module_init (&starfire_driver); } @@ -1967,8 +2134,6 @@ /* * Local variables: - * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O2 -c starfire.c" - * simple-compile-command: "gcc -DMODULE -O2 -c starfire.c" * c-basic-offset: 8 * tab-width: 8 * End: diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/via-rhine.c linux.21pre4-ac2/drivers/net/via-rhine.c --- linux.21pre4/drivers/net/via-rhine.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/via-rhine.c 2003-01-12 00:33:19.000000000 +0000 @@ -1239,6 +1239,12 @@ /* Calculate the next Tx descriptor entry. */ entry = np->cur_tx % TX_RING_SIZE; + if (skb->len < ETH_ZLEN) { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + } + np->tx_skbuff[entry] = skb; if ((np->drv_flags & ReqTxAlign) && diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/wan/c101.c linux.21pre4-ac2/drivers/net/wan/c101.c --- linux.21pre4/drivers/net/wan/c101.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/wan/c101.c 2003-01-28 16:39:15.000000000 +0000 @@ -1,12 +1,11 @@ /* * Moxa C101 synchronous serial card driver for Linux * - * Copyright (C) 2000-2002 Krzysztof Halasa + * Copyright (C) 2000-2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. * * For information see http://hq.pm.waw.pl/hdlc/ * @@ -31,7 +30,7 @@ #include "hd64570.h" -static const char* version = "Moxa C101 driver version: 1.10"; +static const char* version = "Moxa C101 driver version: 1.12"; static const char* devname = "C101"; #define C101_PAGE 0x1D00 @@ -78,7 +77,12 @@ #define sca_in(reg, card) readb((card)->win0base + C101_SCA + (reg)) #define sca_out(value, reg, card) writeb(value, (card)->win0base + C101_SCA + (reg)) #define sca_inw(reg, card) readw((card)->win0base + C101_SCA + (reg)) -#define sca_outw(value, reg, card) writew(value, (card)->win0base + C101_SCA + (reg)) + +/* EDA address register must be set in EDAL, EDAH order - 8 bit ISA bus */ +#define sca_outw(value, reg, card) do { \ + writeb(value & 0xFF, (card)->win0base + C101_SCA + (reg)); \ + writeb((value >> 8 ) & 0xFF, (card)->win0base + C101_SCA + (reg+1));\ +} while(0) #define port_to_card(port) (port) #define log_node(port) (0) @@ -352,7 +356,7 @@ c101_run(irq, ram); if (*hw == '\x0') - return 0; + return first_card ? 0 : -ENOSYS; }while(*hw++ == ':'); printk(KERN_ERR "c101: invalid hardware parameters\n"); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/wan/Config.in linux.21pre4-ac2/drivers/net/wan/Config.in --- linux.21pre4/drivers/net/wan/Config.in 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/wan/Config.in 2003-01-28 16:39:15.000000000 +0000 @@ -68,6 +68,7 @@ tristate ' Generic HDLC layer' CONFIG_HDLC if [ "$CONFIG_HDLC" != "n" ]; then bool ' Raw HDLC support' CONFIG_HDLC_RAW + bool ' Raw HDLC Ethernet device support' CONFIG_HDLC_RAW_ETH bool ' Cisco HDLC support' CONFIG_HDLC_CISCO bool ' Frame Relay support' CONFIG_HDLC_FR bool ' Synchronous Point-to-Point Protocol (PPP) support' CONFIG_HDLC_PPP diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/wan/dscc4.c linux.21pre4-ac2/drivers/net/wan/dscc4.c --- linux.21pre4/drivers/net/wan/dscc4.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/wan/dscc4.c 2003-01-28 16:40:14.000000000 +0000 @@ -487,9 +487,9 @@ skb = dev_alloc_skb(len); dpriv->rx_skbuff[dirty] = skb; if (skb) { - skb->dev = dev; - skb->protocol = htons(ETH_P_HDLC); - skb->mac.raw = skb->data; + skb->dev = dev; + skb->protocol = hdlc_type_trans(skb, dev); + skb->mac.raw = skb->data; rx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data, len, PCI_DMA_FROMDEVICE); } else { diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/wan/farsync.c linux.21pre4-ac2/drivers/net/wan/farsync.c --- linux.21pre4/drivers/net/wan/farsync.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/wan/farsync.c 2003-01-28 16:39:15.000000000 +0000 @@ -764,7 +764,7 @@ /* Push upstream */ skb->mac.raw = skb->data; skb->dev = hdlc_to_dev ( &port->hdlc ); - skb->protocol = htons ( ETH_P_HDLC ); + skb->protocol = hdlc_type_trans(skb, skb->dev); netif_rx ( skb ); port_to_dev ( port )->last_rx = jiffies; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/wan/hd6457x.c linux.21pre4-ac2/drivers/net/wan/hd6457x.c --- linux.21pre4/drivers/net/wan/hd6457x.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/wan/hd6457x.c 2003-01-28 16:39:15.000000000 +0000 @@ -1,12 +1,11 @@ /* * Hitachi SCA HD64570 and HD64572 common driver for Linux * - * Copyright (C) 1998-2000 Krzysztof Halasa + * Copyright (C) 1998-2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. * * Sources of information: * Hitachi HD64570 SCA User's Manual @@ -42,7 +41,7 @@ #error Either hd64570.h or hd64572.h must be included #endif -static char sca_version[]="1.09"; +static char sca_version[]="1.12"; #define get_msci(port) (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET) #define get_dmac_rx(port) (phy_node(port) ? DMAC1RX_OFFSET : DMAC0RX_OFFSET) @@ -294,7 +293,7 @@ skb->mac.raw = skb->data; skb->dev = hdlc_to_dev(&port->hdlc); skb->dev->last_rx = jiffies; - skb->protocol = htons(ETH_P_HDLC); + skb->protocol = hdlc_type_trans(skb, hdlc_to_dev(&port->hdlc)); netif_rx(skb); } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/wan/hdlc_cisco.c linux.21pre4-ac2/drivers/net/wan/hdlc_cisco.c --- linux.21pre4/drivers/net/wan/hdlc_cisco.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/wan/hdlc_cisco.c 2003-01-28 16:39:15.000000000 +0000 @@ -2,12 +2,11 @@ * Generic HDLC support routines for Linux * Cisco HDLC support * - * Copyright (C) 2000 - 2001 Krzysztof Halasa + * Copyright (C) 2000 - 2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. */ #include @@ -85,12 +84,37 @@ skb_put(skb, sizeof(cisco_packet)); skb->priority = TC_PRIO_CONTROL; skb->dev = hdlc_to_dev(hdlc); + skb->nh.raw = skb->data; dev_queue_xmit(skb); } +static unsigned short cisco_type_trans(struct sk_buff *skb, + struct net_device *dev) +{ + hdlc_header *data = (hdlc_header*)skb->data; + + if (skb->len < sizeof(hdlc_header)) + return __constant_htons(ETH_P_HDLC); + + if (data->address != CISCO_MULTICAST && + data->address != CISCO_UNICAST) + return __constant_htons(ETH_P_HDLC); + + switch(data->protocol) { + case __constant_htons(ETH_P_IP): + case __constant_htons(ETH_P_IPX): + case __constant_htons(ETH_P_IPV6): + skb_pull(skb, sizeof(hdlc_header)); + return data->protocol; + default: + return __constant_htons(ETH_P_HDLC); + } +} + + static void cisco_rx(struct sk_buff *skb) { hdlc_device *hdlc = dev_to_hdlc(skb->dev); @@ -109,14 +133,6 @@ skb_pull(skb, sizeof(hdlc_header)); switch(ntohs(data->protocol)) { - case ETH_P_IP: - case ETH_P_IPX: - case ETH_P_IPV6: - skb->protocol = data->protocol; - skb->dev = hdlc_to_dev(hdlc); - netif_rx(skb); - return; - case CISCO_SYS_INFO: /* Packet is not needed, drop it. */ dev_kfree_skb_any(skb); @@ -288,6 +304,7 @@ hdlc->open = cisco_open; hdlc->stop = cisco_close; hdlc->netif_rx = cisco_rx; + hdlc->type_trans = cisco_type_trans; hdlc->proto = IF_PROTO_CISCO; dev->hard_start_xmit = hdlc->xmit; dev->hard_header = cisco_hard_header; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/wan/hdlc_fr.c linux.21pre4-ac2/drivers/net/wan/hdlc_fr.c --- linux.21pre4/drivers/net/wan/hdlc_fr.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/wan/hdlc_fr.c 2003-01-28 16:39:15.000000000 +0000 @@ -2,13 +2,22 @@ * Generic HDLC support routines for Linux * Frame Relay support * - * Copyright (C) 1999 - 2001 Krzysztof Halasa + * Copyright (C) 1999 - 2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + + Theory of PVC state in DCE mode: + + (exist,new) -> 0,0 when "PVC create" or if "link unreliable" + 0,x -> 1,1 if "link reliable" when sending FULL STATUS + 1,1 -> 1,0 if received FULL STATUS ACK + + (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create" + -> 1 when "PVC up" and (exist,new) = 1,0 +*/ #include #include @@ -20,19 +29,23 @@ #include #include #include +#include #include #include #include +#include #include __inline__ pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci) { - pvc_device *pvc=hdlc->state.fr.first_pvc; + pvc_device *pvc = hdlc->state.fr.first_pvc; - while (pvc) { - if (netdev_dlci(&pvc->netdev) == dlci) + while(pvc) { + if (pvc->dlci == dlci) return pvc; + if (pvc->dlci > dlci) + return NULL; /* the listed is sorted */ pvc = pvc->next; } @@ -40,18 +53,72 @@ } +__inline__ pvc_device* add_pvc(hdlc_device *hdlc, u16 dlci) +{ + pvc_device *pvc, **pvc_p = &hdlc->state.fr.first_pvc; + + while(*pvc_p) { + if ((*pvc_p)->dlci == dlci) + return *pvc_p; + if ((*pvc_p)->dlci > dlci) + break; /* the listed is sorted */ + pvc_p = &(*pvc_p)->next; + } + + pvc = kmalloc(sizeof(pvc_device), GFP_KERNEL); + if (!pvc) + return NULL; + + memset(pvc, 0, sizeof(pvc_device)); + pvc->dlci = dlci; + pvc->master = hdlc; + pvc->next = *pvc_p; /* Put it in the chain */ + *pvc_p = pvc; + return pvc; +} + + +__inline__ int pvc_is_used(pvc_device *pvc) +{ + return pvc->main != NULL || pvc->ether != NULL; +} + + +__inline__ void delete_unused_pvcs(hdlc_device *hdlc) +{ + pvc_device **pvc_p = &hdlc->state.fr.first_pvc; + + while(*pvc_p) { + if (!pvc_is_used(*pvc_p)) { + pvc_device *pvc = *pvc_p; + *pvc_p = pvc->next; + kfree(pvc); + continue; + } + pvc_p = &(*pvc_p)->next; + } +} + -__inline__ u16 status_to_dlci(hdlc_device *hdlc, u8 *status, - int *active, int *new) +__inline__ struct net_device** get_dev_p(pvc_device *pvc, int type) { - *new = (status[2] & 0x08); - *active = (!*new && (status[2] & 0x02)); + if (type == ARPHRD_ETHER) + return &pvc->ether; + else + return &pvc->main; +} + + +__inline__ u16 status_to_dlci(u8 *status, int *active, int *new) +{ + *new = (status[2] & 0x08) ? 1 : 0; + *active = (status[2] & 0x02) ? 1 : 0; return ((status[0] & 0x3F)<<4) | ((status[1] & 0x78)>>3); } -__inline__ void dlci_to_status(hdlc_device *hdlc, u16 dlci, u8 *status, +__inline__ void dlci_to_status(u16 dlci, u8 *status, int active, int new) { status[0] = (dlci>>4) & 0x3F; @@ -66,37 +133,50 @@ -static int fr_hard_header(struct sk_buff *skb, struct net_device *dev, - u16 type, void *daddr, void *saddr, unsigned int len) +static int fr_hard_header(struct sk_buff **skb_p, u16 dlci) { u16 head_len; + struct sk_buff *skb = *skb_p; - if (!daddr) - daddr = dev->broadcast; - -#ifdef CONFIG_HDLC_DEBUG_HARD_HEADER - printk(KERN_DEBUG "%s: fr_hard_header called\n", dev->name); -#endif - - switch(type) { - case ETH_P_IP: + switch(skb->protocol) { + case __constant_ntohs(ETH_P_IP): head_len = 4; skb_push(skb, head_len); skb->data[3] = NLPID_IP; break; - case ETH_P_IPV6: + case __constant_ntohs(ETH_P_IPV6): head_len = 4; skb_push(skb, head_len); skb->data[3] = NLPID_IPV6; break; - case LMI_PROTO: + case __constant_ntohs(LMI_PROTO): head_len = 4; skb_push(skb, head_len); skb->data[3] = LMI_PROTO; break; + case __constant_ntohs(ETH_P_802_3): + head_len = 10; + if (skb_headroom(skb) < head_len) { + struct sk_buff *skb2 = skb_realloc_headroom(skb, + head_len); + if (!skb2) + return -ENOBUFS; + dev_kfree_skb(skb); + skb = *skb_p = skb2; + } + skb_push(skb, head_len); + skb->data[3] = FR_PAD; + skb->data[4] = NLPID_SNAP; + skb->data[5] = FR_PAD; + skb->data[6] = 0x80; + skb->data[7] = 0xC2; + skb->data[8] = 0x00; + skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */ + break; + default: head_len = 10; skb_push(skb, head_len); @@ -105,14 +185,12 @@ skb->data[5] = FR_PAD; skb->data[6] = FR_PAD; skb->data[7] = FR_PAD; - skb->data[8] = type>>8; - skb->data[9] = (u8)type; + *(u16*)(skb->data + 8) = skb->protocol; } - memcpy(skb->data, daddr, 2); + dlci_to_q922(skb->data, dlci); skb->data[2] = FR_UI; - - return head_len; + return 0; } @@ -124,13 +202,12 @@ if ((hdlc_to_dev(pvc->master)->flags & IFF_UP) == 0) return -EIO; /* Master must be UP in order to activate PVC */ - if (pvc->master->state.fr.settings.lmi != LMI_NONE) - pvc->state.active = 0; - else - pvc->state.active = 1; + if (pvc->open_count++ == 0) { + if (pvc->master->state.fr.settings.lmi == LMI_NONE) + pvc->state.active = 1; - pvc->state.new = 0; - pvc->master->state.fr.changed = 1; + pvc->master->state.fr.dce_changed = 1; + } return 0; } @@ -139,38 +216,94 @@ static int pvc_close(struct net_device *dev) { pvc_device *pvc = dev_to_pvc(dev); - pvc->state.active = pvc->state.new = 0; - pvc->master->state.fr.changed = 1; + + if (--pvc->open_count == 0) { + if (pvc->master->state.fr.settings.lmi == LMI_NONE) + pvc->state.active = 0; + + if (pvc->master->state.fr.settings.dce) { + pvc->master->state.fr.dce_changed = 1; + pvc->state.active = 0; + } + } return 0; } -static int pvc_xmit(struct sk_buff *skb, struct net_device *dev) +int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { pvc_device *pvc = dev_to_pvc(dev); + fr_proto_pvc_info info; - if (pvc->state.active) { - skb->dev = hdlc_to_dev(pvc->master); - pvc->stats.tx_bytes += skb->len; - pvc->stats.tx_packets++; - if (pvc->state.fecn) - pvc->stats.tx_compressed++; /* TX Congestion counter */ - dev_queue_xmit(skb); - } else { - pvc->stats.tx_dropped++; - dev_kfree_skb(skb); + if (ifr->ifr_settings.type == IF_GET_PROTO) { + if (dev->type == ARPHRD_ETHER) + ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC; + else + ifr->ifr_settings.type = IF_PROTO_FR_PVC; + + if (ifr->ifr_settings.size < sizeof(info)) { + /* data size wanted */ + ifr->ifr_settings.size = sizeof(info); + return -ENOBUFS; + } + + info.dlci = pvc->dlci; + memcpy(info.master, hdlc_to_name(pvc->master), IFNAMSIZ); + if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info, + &info, sizeof(info))) + return -EFAULT; + return 0; } - return 0; + return -EINVAL; +} + + +__inline__ struct net_device_stats *pvc_get_stats(struct net_device *dev) +{ + return (struct net_device_stats *) + ((char *)dev + sizeof(struct net_device)); } -static struct net_device_stats *pvc_get_stats(struct net_device *dev) +static int pvc_xmit(struct sk_buff *skb, struct net_device *dev) { pvc_device *pvc = dev_to_pvc(dev); - return &pvc->stats; + struct net_device_stats *stats = pvc_get_stats(dev); + + if (pvc->state.active) { + if (dev->type == ARPHRD_ETHER) { + int pad = ETH_ZLEN - skb->len; + if (pad > 0) { /* Pad the frame with zeros */ + int len = skb->len; + if (skb_tailroom(skb) < pad) + if (pskb_expand_head(skb, 0, pad, + GFP_ATOMIC)) { + stats->tx_dropped++; + dev_kfree_skb(skb); + return 0; + } + skb_put(skb, pad); + memset(skb->data + len, 0, pad); + } + skb->protocol = __constant_htons(ETH_P_802_3); + } + if (!fr_hard_header(&skb, pvc->dlci)) { + stats->tx_bytes += skb->len; + stats->tx_packets++; + if (pvc->state.fecn) /* TX Congestion counter */ + stats->tx_compressed++; + skb->dev = hdlc_to_dev(pvc->master); + dev_queue_xmit(skb); + return 0; + } + } + + stats->tx_dropped++; + dev_kfree_skb(skb); + return 0; } @@ -187,9 +320,15 @@ static inline void fr_log_dlci_active(pvc_device *pvc) { - printk(KERN_INFO "%s: %sactive%s\n", pvc_to_name(pvc), - pvc->state.active ? "" : "in", - pvc->state.new ? " new" : ""); + printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n", + hdlc_to_name(pvc->master), + pvc->dlci, + pvc->main ? pvc->main->name : "", + pvc->main && pvc->ether ? " " : "", + pvc->ether ? pvc->ether->name : "", + pvc->state.new ? " new" : "", + !pvc->state.exist ? "deleted" : + pvc->state.active ? "active" : "inactive"); } @@ -213,8 +352,8 @@ int i = 0; if (hdlc->state.fr.settings.dce && fullrep) { - len += hdlc->state.fr.pvc_count * (2 + stat_len); - if (len > HDLC_MAX_MTU) { + len += hdlc->state.fr.dce_pvc_count * (2 + stat_len); + if (len > HDLC_MAX_MRU) { printk(KERN_WARNING "%s: Too many PVCs while sending " "LMI full report\n", hdlc_to_name(hdlc)); return; @@ -224,12 +363,13 @@ skb = dev_alloc_skb(len); if (!skb) { printk(KERN_WARNING "%s: Memory squeeze on fr_lmi_send()\n", - hdlc_to_name(hdlc)); + hdlc_to_name(hdlc)); return; } memset(skb->data, 0, len); skb_reserve(skb, 4); - fr_hard_header(skb, hdlc_to_dev(hdlc), LMI_PROTO, NULL, NULL, 0); + skb->protocol = __constant_htons(LMI_PROTO); + fr_hard_header(&skb, LMI_DLCI); data = skb->tail; data[i++] = LMI_CALLREF; data[i++] = hdlc->state.fr.settings.dce @@ -253,16 +393,20 @@ ? LMI_CCITT_PVCSTAT : LMI_PVCSTAT; data[i++] = stat_len; - if (hdlc->state.fr.reliable && - (pvc->netdev.flags & IFF_UP) && - !pvc->state.active && - !pvc->state.new) { - pvc->state.new = 1; + /* LMI start/restart */ + if (hdlc->state.fr.reliable && !pvc->state.exist) { + pvc->state.exist = pvc->state.new = 1; + fr_log_dlci_active(pvc); + } + + /* ifconfig PVC up */ + if (pvc->open_count && !pvc->state.active && + pvc->state.exist && !pvc->state.new) { + pvc->state.active = 1; fr_log_dlci_active(pvc); } - dlci_to_status(hdlc, netdev_dlci(&pvc->netdev), - data + i, + dlci_to_status(pvc->dlci, data + i, pvc->state.active, pvc->state.new); i += stat_len; pvc = pvc->next; @@ -272,6 +416,7 @@ skb_put(skb, i); skb->priority = TC_PRIO_CONTROL; skb->dev = hdlc_to_dev(hdlc); + skb->nh.raw = skb->data; dev_queue_xmit(skb); } @@ -312,10 +457,11 @@ if (reliable) { hdlc->state.fr.n391cnt = 0; /* Request full status */ - hdlc->state.fr.changed = 1; + hdlc->state.fr.dce_changed = 1; } else { while (pvc) { /* Deactivate all PVCs */ - pvc->state.new = pvc->state.active = 0; + pvc->state.exist = 0; + pvc->state.active = pvc->state.new = 0; pvc = pvc->next; } } @@ -346,7 +492,7 @@ { int stat_len; pvc_device *pvc; - int reptype = -1, error; + int reptype = -1, error, no_ram; u8 rxseq, txseq; int i; @@ -420,20 +566,18 @@ while (pvc) { if (pvc->state.new) { pvc->state.new = 0; - pvc->state.active = 1; - fr_log_dlci_active(pvc); /* Tell DTE that new PVC is now active */ - hdlc->state.fr.changed = 1; + hdlc->state.fr.dce_changed = 1; } pvc = pvc->next; } } - if (hdlc->state.fr.changed) { + if (hdlc->state.fr.dce_changed) { reptype = LMI_FULLREP; hdlc->state.fr.fullrep_sent = 1; - hdlc->state.fr.changed = 0; + hdlc->state.fr.dce_changed = 0; } fr_lmi_send(hdlc, reptype == LMI_FULLREP ? 1 : 0); @@ -449,13 +593,14 @@ pvc = hdlc->state.fr.first_pvc; while (pvc) { - pvc->state.deleted = pvc->state.active; /* mark active PVCs */ + pvc->state.deleted = 1; pvc = pvc->next; } + no_ram = 0; while (skb->len >= i + 2 + stat_len) { u16 dlci; - int active, new; + unsigned int active, new; if (skb->data[i] != ((hdlc->state.fr.settings.lmi == LMI_CCITT) ? LMI_CCITT_PVCSTAT : LMI_PVCSTAT)) { @@ -472,21 +617,28 @@ } i++; - dlci = status_to_dlci(hdlc, skb->data + i, &active, &new); - pvc = find_pvc(hdlc, dlci); + dlci = status_to_dlci(skb->data + i, &active, &new); + + pvc = add_pvc(hdlc, dlci); + + if (!pvc && !no_ram) { + printk(KERN_WARNING + "%s: Memory squeeze on fr_lmi_recv()\n", + hdlc_to_name(hdlc)); + no_ram = 1; + } - active |= new; if (pvc) { - if (active && !pvc->state.active && - (pvc->netdev.flags & IFF_UP)) { + pvc->state.exist = 1; + pvc->state.deleted = 0; + if (active != pvc->state.active || + new != pvc->state.new || + !pvc->state.exist) { + pvc->state.new = new; pvc->state.active = active; fr_log_dlci_active(pvc); } - pvc->state.deleted = 0; } - else if (new) - printk(KERN_INFO "%s: new PVC available, DLCI=%u\n", - hdlc_to_name(hdlc), dlci); i += stat_len; } @@ -494,10 +646,10 @@ pvc = hdlc->state.fr.first_pvc; while (pvc) { - if (pvc->state.deleted) { + if (pvc->state.deleted && pvc->state.exist) { pvc->state.active = pvc->state.new = 0; + pvc->state.exist = 0; fr_log_dlci_active(pvc); - pvc->state.deleted = 0; } pvc = pvc->next; } @@ -517,8 +669,9 @@ u8 *data = skb->data; u16 dlci; pvc_device *pvc; + struct net_device *dev = NULL; - if (skb->len<4 || fh->ea1 || data[2] != FR_UI) + if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI) goto rx_error; dlci = q922_to_dlci(skb->data); @@ -550,57 +703,39 @@ printk(KERN_INFO "%s: No PVC for received frame's DLCI %d\n", hdlc_to_name(hdlc), dlci); #endif - goto rx_error; - } - - if ((pvc->netdev.flags & IFF_UP) == 0) { -#ifdef CONFIG_HDLC_DEBUG_PKT - printk(KERN_INFO "%s: PVC for received frame's DLCI %d is down\n", - hdlc_to_name(hdlc), dlci); -#endif - goto rx_error; + dev_kfree_skb_any(skb); + return; } - pvc->stats.rx_packets++; /* PVC traffic */ - pvc->stats.rx_bytes += skb->len; - - if (pvc->state.fecn != (fh->fecn ? PVC_STATE_FECN : 0)) { + if (pvc->state.fecn != fh->fecn) { #ifdef CONFIG_HDLC_DEBUG_ECN - printk(KERN_DEBUG "%s: FECN O%s\n", pvc_to_name(pvc), - fh->fecn ? "N" : "FF"); + printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", hdlc_to_name(pvc), + dlci, fh->fecn ? "N" : "FF"); #endif pvc->state.fecn ^= 1; } - if (pvc->state.becn != (fh->becn ? PVC_STATE_BECN : 0)) { + if (pvc->state.becn != fh->becn) { #ifdef CONFIG_HDLC_DEBUG_ECN - printk(KERN_DEBUG "%s: BECN O%s\n", pvc_to_name(pvc), - fh->becn ? "N" : "FF"); + printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", hdlc_to_name(pvc), + dlci, fh->becn ? "N" : "FF"); #endif pvc->state.becn ^= 1; } - if (pvc->state.becn) - pvc->stats.rx_compressed++; - - skb->dev = &pvc->netdev; if (data[3] == NLPID_IP) { skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ + dev = pvc->main; skb->protocol = htons(ETH_P_IP); - netif_rx(skb); - return; - } - - if (data[3] == NLPID_IPV6) { + } else if (data[3] == NLPID_IPV6) { skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ + dev = pvc->main; skb->protocol = htons(ETH_P_IPV6); - netif_rx(skb); - return; - } - if (data[3] == FR_PAD && data[4] == NLPID_SNAP && data[5] == FR_PAD) { + } else if (skb->len > 10 && data[3] == FR_PAD && + data[4] == NLPID_SNAP && data[5] == FR_PAD) { u16 oui = ntohs(*(u16*)(data + 6)); u16 pid = ntohs(*(u16*)(data + 8)); skb_pull(skb, 10); @@ -610,23 +745,39 @@ case ETH_P_IPX: case ETH_P_IP: /* a long variant */ case ETH_P_IPV6: + dev = pvc->main; skb->protocol = htons(pid); break; + case 0x80C20007: /* bridged Ethernet frame */ + if ((dev = pvc->ether) != NULL) + skb->protocol = eth_type_trans(skb, dev); + break; + default: printk(KERN_INFO "%s: Unsupported protocol, OUI=%x " "PID=%x\n", hdlc_to_name(hdlc), oui, pid); dev_kfree_skb_any(skb); return; } - - netif_rx(skb); + } else { + printk(KERN_INFO "%s: Unsupported protocol, NLPID=%x " + "length = %i\n", hdlc_to_name(hdlc), data[3], skb->len); + dev_kfree_skb_any(skb); return; } - printk(KERN_INFO "%s: Unsupported protocol, NLPID=%x\n", - hdlc_to_name(hdlc), data[3]); - dev_kfree_skb_any(skb); + if (dev) { + struct net_device_stats *stats = pvc_get_stats(dev); + stats->rx_packets++; /* PVC traffic */ + stats->rx_bytes += skb->len; + if (pvc->state.becn) + stats->rx_compressed++; + skb->dev = dev; + netif_rx(skb); + } else + dev_kfree_skb_any(skb); + return; rx_error: @@ -641,7 +792,7 @@ if (hdlc->state.fr.settings.lmi != LMI_NONE) { hdlc->state.fr.last_poll = 0; hdlc->state.fr.reliable = 0; - hdlc->state.fr.changed = 1; + hdlc->state.fr.dce_changed = 1; hdlc->state.fr.request = 0; hdlc->state.fr.fullrep_sent = 0; hdlc->state.fr.last_errors = 0xFFFFFFFF; @@ -669,90 +820,119 @@ if (hdlc->state.fr.settings.lmi != LMI_NONE) del_timer_sync(&hdlc->state.fr.timer); - while(pvc) { - dev_close(&pvc->netdev); /* Shutdown all PVCs for this FRAD */ + while(pvc) { /* Shutdown all PVCs for this FRAD */ + if (pvc->main) + dev_close(pvc->main); + if (pvc->ether) + dev_close(pvc->ether); + pvc->state.active = pvc->state.new = pvc->state.fecn = + pvc->state.becn = 0; + pvc->state.exist = 0; pvc = pvc->next; } } - -static int fr_pvc(hdlc_device *hdlc, unsigned int dlci, int create) +static int fr_add_pvc(hdlc_device *hdlc, unsigned int dlci, int type) { - pvc_device **pvc_p = &hdlc->state.fr.first_pvc; - pvc_device *pvc; - int result; + pvc_device *pvc = NULL; + struct net_device *dev; + int result, used; + char * prefix = "pvc%d"; - if(dlci <= 0 || dlci >= 1024) - return -EINVAL; /* Only 10 bits for DLCI, DLCI 0 reserved */ + if (type == ARPHRD_ETHER) + prefix = "pvceth%d"; - while(*pvc_p) { - if (netdev_dlci(&(*pvc_p)->netdev) == dlci) - break; - pvc_p = &(*pvc_p)->next; + if ((pvc = add_pvc(hdlc, dlci)) == NULL) { + printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n", + hdlc_to_name(hdlc)); + return -ENOBUFS; } - if (create) { /* Create PVC */ - if (*pvc_p != NULL) - return -EEXIST; - - pvc = *pvc_p = kmalloc(sizeof(pvc_device), GFP_KERNEL); - if (!pvc) { - printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n", - hdlc_to_name(hdlc)); - return -ENOBUFS; - } - memset(pvc, 0, sizeof(pvc_device)); + if (*get_dev_p(pvc, type)) + return -EEXIST; - pvc->netdev.hard_start_xmit = pvc_xmit; - pvc->netdev.get_stats = pvc_get_stats; - pvc->netdev.open = pvc_open; - pvc->netdev.stop = pvc_close; - pvc->netdev.change_mtu = pvc_change_mtu; - pvc->netdev.mtu = HDLC_MAX_MTU; - - pvc->netdev.type = ARPHRD_DLCI; - pvc->netdev.hard_header_len = 16; - pvc->netdev.hard_header = fr_hard_header; - pvc->netdev.tx_queue_len = 0; - pvc->netdev.flags = IFF_POINTOPOINT; - - pvc->master = hdlc; - *(u16*)pvc->netdev.dev_addr = htons(dlci); - dlci_to_q922(pvc->netdev.broadcast, dlci); - pvc->netdev.addr_len = 2; + used = pvc_is_used(pvc); - result = dev_alloc_name(&pvc->netdev, "pvc%d"); - if (result < 0) { - kfree(pvc); - *pvc_p = NULL; - return result; - } - - if (register_netdevice(&pvc->netdev) != 0) { - kfree(pvc); - *pvc_p = NULL; - return -EIO; - } + dev = kmalloc(sizeof(struct net_device) + + sizeof(struct net_device_stats), GFP_KERNEL); + if (!dev) { + printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n", + hdlc_to_name(hdlc)); + delete_unused_pvcs(hdlc); + return -ENOBUFS; + } + memset(dev, 0, sizeof(struct net_device) + + sizeof(struct net_device_stats)); - hdlc->state.fr.changed = 1; - hdlc->state.fr.pvc_count++; - return 0; + if (type == ARPHRD_ETHER) { + ether_setup(dev); + memcpy(dev->dev_addr, "\x00\x01", 2); + get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); + } else { + dev->type = ARPHRD_DLCI; + dev->flags = IFF_POINTOPOINT; + dev->hard_header_len = 10; + dev->addr_len = 2; + *(u16*)dev->dev_addr = htons(dlci); + dlci_to_q922(dev->broadcast, dlci); + } + dev->hard_start_xmit = pvc_xmit; + dev->get_stats = pvc_get_stats; + dev->open = pvc_open; + dev->stop = pvc_close; + dev->do_ioctl = pvc_ioctl; + dev->change_mtu = pvc_change_mtu; + dev->mtu = HDLC_MAX_MTU; + dev->tx_queue_len = 0; + dev->priv = pvc; + + result = dev_alloc_name(dev, prefix); + if (result < 0) { + kfree(dev); + delete_unused_pvcs(hdlc); + return result; + } + + if (register_netdevice(dev) != 0) { + kfree(dev); + delete_unused_pvcs(hdlc); + return -EIO; + } + + *get_dev_p(pvc, type) = dev; + if (!used) { + hdlc->state.fr.dce_changed = 1; + hdlc->state.fr.dce_pvc_count++; } + return 0; +} + + - if (*pvc_p == NULL) /* Delete PVC */ +static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type) +{ + pvc_device *pvc; + struct net_device *dev; + + if ((pvc = find_pvc(hdlc, dlci)) == NULL) return -ENOENT; - pvc = *pvc_p; + if ((dev = *get_dev_p(pvc, type)) == NULL) + return -ENOENT; - if (pvc->netdev.flags & IFF_UP) + if (dev->flags & IFF_UP) return -EBUSY; /* PVC in use */ - hdlc->state.fr.changed = 1; - hdlc->state.fr.pvc_count--; - *pvc_p = pvc->next; - unregister_netdevice(&pvc->netdev); - kfree(pvc); + unregister_netdevice(dev); + kfree(dev); + *get_dev_p(pvc, type) = NULL; + + if (!pvc_is_used(pvc)) { + hdlc->state.fr.dce_pvc_count--; + hdlc->state.fr.dce_changed = 1; + } + delete_unused_pvcs(hdlc); return 0; } @@ -763,14 +943,21 @@ pvc_device *pvc = hdlc->state.fr.first_pvc; while(pvc) { pvc_device *next = pvc->next; - unregister_netdev(&pvc->netdev); + if (pvc->main) { + unregister_netdevice(pvc->main); + kfree(pvc->main); + } + if (pvc->ether) { + unregister_netdevice(pvc->ether); + kfree(pvc->ether); + } kfree(pvc); pvc = next; } hdlc->state.fr.first_pvc = NULL; /* All PVCs destroyed */ - hdlc->state.fr.pvc_count = 0; - hdlc->state.fr.changed = 1; + hdlc->state.fr.dce_pvc_count = 0; + hdlc->state.fr.dce_changed = 1; } @@ -828,25 +1015,27 @@ if (hdlc->proto != IF_PROTO_FR) { hdlc_proto_detach(hdlc); hdlc->state.fr.first_pvc = NULL; - hdlc->state.fr.pvc_count = 0; + hdlc->state.fr.dce_pvc_count = 0; } memcpy(&hdlc->state.fr.settings, &new_settings, size); hdlc->open = fr_open; hdlc->stop = fr_close; hdlc->netif_rx = fr_rx; + hdlc->type_trans = NULL; hdlc->proto_detach = fr_destroy; hdlc->proto = IF_PROTO_FR; dev->hard_start_xmit = hdlc->xmit; - dev->hard_header = fr_hard_header; + dev->hard_header = NULL; dev->type = ARPHRD_FRAD; - dev->addr_len = 2; - *(u16*)dev->dev_addr = htons(LMI_DLCI); - dlci_to_q922(dev->broadcast, LMI_DLCI); + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->addr_len = 0; return 0; case IF_PROTO_FR_ADD_PVC: case IF_PROTO_FR_DEL_PVC: + case IF_PROTO_FR_ADD_ETH_PVC: + case IF_PROTO_FR_DEL_ETH_PVC: if(!capable(CAP_NET_ADMIN)) return -EPERM; @@ -854,8 +1043,20 @@ sizeof(fr_proto_pvc))) return -EFAULT; - return fr_pvc(hdlc, pvc.dlci, - ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC); + if (pvc.dlci <= 0 || pvc.dlci >= 1024) + return -EINVAL; /* Only 10 bits, DLCI 0 reserved */ + + if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC || + ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC) + result = ARPHRD_ETHER; /* bridged Ethernet device */ + else + result = ARPHRD_DLCI; + + if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC || + ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC) + return fr_add_pvc(hdlc, pvc.dlci, result); + else + return fr_del_pvc(hdlc, pvc.dlci, result); } return -EINVAL; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/wan/hdlc_generic.c linux.21pre4-ac2/drivers/net/wan/hdlc_generic.c --- linux.21pre4/drivers/net/wan/hdlc_generic.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/wan/hdlc_generic.c 2003-01-28 16:39:15.000000000 +0000 @@ -1,17 +1,13 @@ /* * Generic HDLC support routines for Linux * - * Copyright (C) 1999 - 2001 Krzysztof Halasa + * Copyright (C) 1999 - 2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. * - * Current status: - * - this is work in progress - * - not heavily tested on SMP - * - currently supported: + * Currently supported: * * raw IP-in-HDLC * * Cisco HDLC * * Frame Relay with ANSI or CCITT LMI (both user and network side) @@ -37,7 +33,7 @@ #include -static const char* version = "HDLC support module revision 1.11"; +static const char* version = "HDLC support module revision 1.12"; static int hdlc_change_mtu(struct net_device *dev, int new_mtu) @@ -60,7 +56,13 @@ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p) { - dev_to_hdlc(dev)->netif_rx(skb); + hdlc_device *hdlc = dev_to_hdlc(dev); + if (hdlc->netif_rx) + hdlc->netif_rx(skb); + else { + hdlc->stats.rx_dropped++; /* Shouldn't happen */ + dev_kfree_skb(skb); + } return 0; } @@ -69,6 +71,10 @@ #define hdlc_raw_ioctl(hdlc, ifr) -ENOSYS #endif +#ifndef CONFIG_HDLC_RAW_ETH +#define hdlc_raw_eth_ioctl(hdlc, ifr) -ENOSYS +#endif + #ifndef CONFIG_HDLC_PPP #define hdlc_ppp_ioctl(hdlc, ifr) -ENOSYS #endif @@ -96,6 +102,7 @@ switch(ifr->ifr_settings.type) { case IF_PROTO_HDLC: + case IF_PROTO_HDLC_ETH: case IF_PROTO_PPP: case IF_PROTO_CISCO: case IF_PROTO_FR: @@ -109,6 +116,7 @@ switch(proto) { case IF_PROTO_HDLC: return hdlc_raw_ioctl(hdlc, ifr); + case IF_PROTO_HDLC_ETH: return hdlc_raw_eth_ioctl(hdlc, ifr); case IF_PROTO_PPP: return hdlc_ppp_ioctl(hdlc, ifr); case IF_PROTO_CISCO: return hdlc_cisco_ioctl(hdlc, ifr); case IF_PROTO_FR: return hdlc_fr_ioctl(hdlc, ifr); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/wan/hdlc_ppp.c linux.21pre4-ac2/drivers/net/wan/hdlc_ppp.c --- linux.21pre4/drivers/net/wan/hdlc_ppp.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/wan/hdlc_ppp.c 2003-01-28 16:39:15.000000000 +0000 @@ -2,12 +2,11 @@ * Generic HDLC support routines for Linux * Point-to-point protocol support * - * Copyright (C) 1999 - 2001 Krzysztof Halasa + * Copyright (C) 1999 - 2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. */ #include @@ -68,10 +67,10 @@ -static void ppp_rx(struct sk_buff *skb) +static unsigned short ppp_type_trans(struct sk_buff *skb, + struct net_device *dev) { - skb->protocol = htons(ETH_P_WAN_PPP); - netif_rx(skb); + return __constant_htons(ETH_P_WAN_PPP); } @@ -103,7 +102,8 @@ hdlc->open = ppp_open; hdlc->stop = ppp_close; - hdlc->netif_rx = ppp_rx; + hdlc->netif_rx = NULL; + hdlc->type_trans = ppp_type_trans; hdlc->proto = IF_PROTO_PPP; dev->hard_start_xmit = hdlc->xmit; dev->hard_header = NULL; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/wan/hdlc_raw.c linux.21pre4-ac2/drivers/net/wan/hdlc_raw.c --- linux.21pre4/drivers/net/wan/hdlc_raw.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/wan/hdlc_raw.c 2003-01-28 16:39:15.000000000 +0000 @@ -2,12 +2,11 @@ * Generic HDLC support routines for Linux * HDLC support * - * Copyright (C) 1999 - 2001 Krzysztof Halasa + * Copyright (C) 1999 - 2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. */ #include @@ -26,10 +25,10 @@ #include -static void raw_rx(struct sk_buff *skb) +static unsigned short raw_type_trans(struct sk_buff *skb, + struct net_device *dev) { - skb->protocol = htons(ETH_P_IP); - netif_rx(skb); + return __constant_htons(ETH_P_IP); } @@ -67,7 +66,7 @@ new_settings.encoding = ENCODING_NRZ; if (new_settings.parity == PARITY_DEFAULT) - new_settings.parity = PARITY_NONE; + new_settings.parity = PARITY_CRC16_PR1_CCITT; result = hdlc->attach(hdlc, new_settings.encoding, new_settings.parity); @@ -79,11 +78,13 @@ hdlc->open = NULL; hdlc->stop = NULL; - hdlc->netif_rx = raw_rx; + hdlc->netif_rx = NULL; + hdlc->type_trans = raw_type_trans; hdlc->proto = IF_PROTO_HDLC; dev->hard_start_xmit = hdlc->xmit; dev->hard_header = NULL; dev->type = ARPHRD_RAWHDLC; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->addr_len = 0; return 0; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/wan/hdlc_raw_eth.c linux.21pre4-ac2/drivers/net/wan/hdlc_raw_eth.c --- linux.21pre4/drivers/net/wan/hdlc_raw_eth.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/net/wan/hdlc_raw_eth.c 2003-01-28 16:39:15.000000000 +0000 @@ -0,0 +1,110 @@ +/* + * Generic HDLC support routines for Linux + * HDLC Ethernet emulation support + * + * Copyright (C) 2002-2003 Krzysztof Halasa + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +static int eth_tx(struct sk_buff *skb, struct net_device *dev) +{ + int pad = ETH_ZLEN - skb->len; + if (pad > 0) { /* Pad the frame with zeros */ + int len = skb->len; + if (skb_tailroom(skb) < pad) + if (pskb_expand_head(skb, 0, pad, GFP_ATOMIC)) { + dev_to_hdlc(dev)->stats.tx_dropped++; + dev_kfree_skb(skb); + return 0; + } + skb_put(skb, pad); + memset(skb->data + len, 0, pad); + } + return dev_to_hdlc(dev)->xmit(skb, dev); +} + + +int hdlc_raw_eth_ioctl(hdlc_device *hdlc, struct ifreq *ifr) +{ + raw_hdlc_proto *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc; + const size_t size = sizeof(raw_hdlc_proto); + raw_hdlc_proto new_settings; + struct net_device *dev = hdlc_to_dev(hdlc); + int result; + void *old_ch_mtu; + int old_qlen; + + switch (ifr->ifr_settings.type) { + case IF_GET_PROTO: + ifr->ifr_settings.type = IF_PROTO_HDLC_ETH; + if (ifr->ifr_settings.size < size) { + ifr->ifr_settings.size = size; /* data size wanted */ + return -ENOBUFS; + } + if (copy_to_user(raw_s, &hdlc->state.raw_hdlc.settings, size)) + return -EFAULT; + return 0; + + case IF_PROTO_HDLC_ETH: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (dev->flags & IFF_UP) + return -EBUSY; + + if (copy_from_user(&new_settings, raw_s, size)) + return -EFAULT; + + if (new_settings.encoding == ENCODING_DEFAULT) + new_settings.encoding = ENCODING_NRZ; + + if (new_settings.parity == PARITY_DEFAULT) + new_settings.parity = PARITY_CRC16_PR1_CCITT; + + result = hdlc->attach(hdlc, new_settings.encoding, + new_settings.parity); + if (result) + return result; + + hdlc_proto_detach(hdlc); + memcpy(&hdlc->state.raw_hdlc.settings, &new_settings, size); + + hdlc->open = NULL; + hdlc->stop = NULL; + hdlc->netif_rx = NULL; + hdlc->type_trans = eth_type_trans; + hdlc->proto = IF_PROTO_HDLC_ETH; + dev->hard_start_xmit = eth_tx; + old_ch_mtu = dev->change_mtu; + old_qlen = dev->tx_queue_len; + ether_setup(dev); + dev->change_mtu = old_ch_mtu; + dev->tx_queue_len = old_qlen; + memcpy(dev->dev_addr, "\x00\x01", 2); + get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); + return 0; + } + + return -EINVAL; +} diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/wan/hdlc_x25.c linux.21pre4-ac2/drivers/net/wan/hdlc_x25.c --- linux.21pre4/drivers/net/wan/hdlc_x25.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/wan/hdlc_x25.c 2003-01-28 16:39:15.000000000 +0000 @@ -2,12 +2,11 @@ * Generic HDLC support routines for Linux * X.25 support * - * Copyright (C) 1999 - 2001 Krzysztof Halasa + * Copyright (C) 1999 - 2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. */ #include @@ -204,6 +203,7 @@ hdlc->open = x25_open; hdlc->stop = x25_close; hdlc->netif_rx = x25_rx; + hdlc->type_trans = NULL; hdlc->proto = IF_PROTO_X25; dev->hard_start_xmit = x25_xmit; dev->hard_header = NULL; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/wan/Makefile linux.21pre4-ac2/drivers/net/wan/Makefile --- linux.21pre4/drivers/net/wan/Makefile 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/wan/Makefile 2003-01-28 16:39:15.000000000 +0000 @@ -24,6 +24,7 @@ hdlc-y := hdlc_generic.o hdlc-$(CONFIG_HDLC_RAW) += hdlc_raw.o +hdlc-$(CONFIG_HDLC_RAW_ETH) += hdlc_raw_eth.o hdlc-$(CONFIG_HDLC_CISCO) += hdlc_cisco.o hdlc-$(CONFIG_HDLC_FR) += hdlc_fr.o hdlc-$(CONFIG_HDLC_PPP) += hdlc_ppp.o diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/wavelan.c linux.21pre4-ac2/drivers/net/wavelan.c --- linux.21pre4/drivers/net/wavelan.c 2003-01-29 16:26:40.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/wavelan.c 2003-01-06 23:13:06.000000000 +0000 @@ -2825,6 +2825,13 @@ (unsigned) skb); #endif + if(skb->len < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + } + /* * Block a timer-based transmit from overlapping. * In other words, prevent reentering this routine. diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/yellowfin.c linux.21pre4-ac2/drivers/net/yellowfin.c --- linux.21pre4/drivers/net/yellowfin.c 2003-01-29 16:26:41.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/yellowfin.c 2003-01-06 15:38:15.000000000 +0000 @@ -857,6 +857,7 @@ { struct yellowfin_private *yp = dev->priv; unsigned entry; + int len = skb->len; netif_stop_queue (dev); @@ -872,27 +873,37 @@ int cacheline_end = ((unsigned long)skb->data + skb->len) % 32; /* Fix GX chipset errata. */ if (cacheline_end > 24 || cacheline_end == 0) - skb->len += 32 - cacheline_end + 1; + { + len = skb->len + 32 - cacheline_end + 1; + if(len != skb->len) + skb = skb_padto(skb, len); + } + if(skb == NULL) + { + yp->tx_skbuff[entry] = NULL; + netif_wake_queue(dev); + return 0; + } } #ifdef NO_TXSTATS yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, - skb->data, skb->len, PCI_DMA_TODEVICE)); + skb->data, len, PCI_DMA_TODEVICE)); yp->tx_ring[entry].result_status = 0; if (entry >= TX_RING_SIZE-1) { /* New stop command. */ yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd = - cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | skb->len); + cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len); } else { yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->tx_ring[entry].dbdma_cmd = - cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | skb->len); + cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len); } yp->cur_tx++; #else - yp->tx_ring[entry<<1].request_cnt = skb->len; + yp->tx_ring[entry<<1].request_cnt = len; yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev, - skb->data, skb->len, PCI_DMA_TODEVICE)); + skb->data, len, PCI_DMA_TODEVICE)); /* The input_last (status-write) command is constant, but we must rewrite the subsequent 'stop' command. */ @@ -905,7 +916,7 @@ yp->tx_ring[entry<<1].dbdma_cmd = cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE : - CMD_TX_PKT | BRANCH_IFTRUE) | skb->len); + CMD_TX_PKT | BRANCH_IFTRUE) | len); #endif /* Non-x86 Todo: explicitly flush cache lines here. */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/net/znet.c linux.21pre4-ac2/drivers/net/znet.c --- linux.21pre4/drivers/net/znet.c 2003-01-29 16:26:40.000000000 +0000 +++ linux.21pre4-ac2/drivers/net/znet.c 2003-01-06 15:38:15.000000000 +0000 @@ -348,10 +348,19 @@ int ioaddr = dev->base_addr; struct net_local *lp = (struct net_local *)dev->priv; unsigned long flags; + short length = skb->len; if (znet_debug > 4) printk(KERN_DEBUG "%s: ZNet_send_packet.\n", dev->name); + if(length < ETH_ZLEN) + { + skb = skb_padto(skb, ETH_ZLEN); + if(skb == NULL) + return 0; + length = ETH_ZLEN; + } + netif_stop_queue (dev); /* Check that the part hasn't reset itself, probably from suspend. */ @@ -362,7 +371,6 @@ hardware_init(dev); if (1) { - short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; unsigned char *buf = (void *)skb->data; ushort *tx_link = zn.tx_cur - 1; ushort rnd_len = (length + 1)>>1; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/pci/pci.ids linux.21pre4-ac2/drivers/pci/pci.ids --- linux.21pre4/drivers/pci/pci.ids 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/pci/pci.ids 2003-01-06 15:38:23.000000000 +0000 @@ -3044,7 +3044,7 @@ f015 NinjaSCSI-32 Melco 1146 Force Computers 1147 Interface Corp -1148 Syskonnect (Schneider & Koch) +1148 SysKonnect 4000 FDDI Adapter 0e11 b03b Netelligent 100 FDDI DAS Fibre SC 0e11 b03c Netelligent 100 FDDI SAS Fibre SC @@ -3062,15 +3062,25 @@ 1148 5843 FDDI SK-5843 (SK-NET FDDI-LP64) 1148 5844 FDDI SK-5844 (SK-NET FDDI-LP64 DAS) 4200 Token Ring adapter - 4300 Gigabit Ethernet - 1148 9821 SK-9821 (1000Base-T single link) - 1148 9822 SK-9822 (1000Base-T dual link) - 1148 9841 SK-9841 (1000Base-LX single link) - 1148 9842 SK-9842 (1000Base-LX dual link) - 1148 9843 SK-9843 (1000Base-SX single link) - 1148 9844 SK-9844 (1000Base-SX dual link) - 1148 9861 SK-9861 (1000Base-SX VF45 single link) - 1148 9862 SK-9862 (1000Base-SX VF45 dual link) + 4300 SK-98xx Gigabit Ethernet Server Adapter + 1148 9821 SK-9821 Gigabit Ethernet 1000Base-T Server Adapter + 1148 9822 SK-9822 Gigabit Ethernet 1000Base-T Dual Port Server Adapter + 1148 9841 SK-9841 Gigabit Ethernet 1000Base-LX Server Adapter + 1148 9842 SK-9842 Gigabit Ethernet 1000Base-LX Dual Port Server Adapter + 1148 9843 SK-9843 Gigabit Ethernet 1000Base-SX Server Adapter + 1148 9844 SK-9844 Gigabit Ethernet 1000Base-SX Dual Port Server Adapter + 1148 9861 SK-9861 Gigabit Ethernet 1000Base-SX Server Adapter + 1148 9862 SK-9862 Gigabit Ethernet 1000Base-SX Dual Port Server Adapter + 1148 9871 SK-9871 Gigabit Ethernet 1000Base-ZX Server Adapter + 1148 9872 SK-9872 Gigabit Ethernet 1000Base-ZX Dual Port Server Adapter + 4320 SK-98xx Gigabit Ethernet Server Adapter + 1148 9521 SK-9521 10/100/1000Base-T Adapter + 1148 5021 SK-9821 V2.0 Gigabit Ethernet 10/100/1000Base-T Adapter + 1148 5041 SK-9841 V2.0 Gigabit Ethernet 1000Base-LX Adapter + 1148 5043 SK-9843 V2.0 Gigabit Ethernet 1000Base-SX Adapter + 1148 5051 SK-9851 V2.0 Gigabit Ethernet 1000Base-SX Adapter + 1148 5061 SK-9861 V2.0 Gigabit Ethernet 1000Base-SX Adapter + 1148 5071 SK-9871 V2.0 Gigabit Ethernet 1000Base-ZX Adapter 4400 Gigabit Ethernet 1149 Win System Corporation 114a VMIC diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/pci/quirks.c linux.21pre4-ac2/drivers/pci/quirks.c --- linux.21pre4/drivers/pci/quirks.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/pci/quirks.c 2003-01-31 12:33:14.000000000 +0000 @@ -234,7 +234,6 @@ static void __devinit quirk_ati_exploding_mce(struct pci_dev *dev) { printk(KERN_INFO "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb.\n"); - /* Mae rhaid in i beidio a edrych ar y lleoliad I/O hyn */ request_region(0x3b0, 0x0C, "RadeonIGP"); request_region(0x3d3, 0x01, "RadeonIGP"); } @@ -573,6 +572,41 @@ } /* + * Ensure C0 rev restreaming is off. This is normally done by + * the BIOS but in the odd case it is not the results are corruption + * hence the presence of a Linux check + */ + +static void __init quirk_disable_pxb(struct pci_dev *pdev) +{ + u16 config; + u8 rev; + + pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); + if(rev != 0x04) /* Only C0 requires this */ + return; + pci_read_config_word(pdev, 0x40, &config); + if(config & (1<<6)) + { + config &= ~(1<<6); + pci_write_config_word(pdev, 0x40, config); + printk(KERN_INFO "PCI: C0 revision 450NX. Disabling PCI restreaming.\n"); + } +} + +/* + * VIA northbridges care about PCI_INTERRUPT_LINE + */ + +int interrupt_line_quirk; + +static void __init quirk_via_bridge(struct pci_dev *pdev) +{ + if(pdev->devfn == 0) + interrupt_line_quirk = 1; +} + +/* * The main table of quirks. */ @@ -587,6 +621,7 @@ { PCI_FIXUP_FINAL, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs }, { PCI_FIXUP_FINAL, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs }, { PCI_FIXUP_FINAL, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs }, + { PCI_FIXUP_FINAL, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb }, { PCI_FIXUP_HEADER, PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M }, { PCI_FIXUP_HEADER, PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M }, { PCI_FIXUP_FINAL, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton }, @@ -616,6 +651,7 @@ { PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_2, quirk_piix3_usb }, { PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_2, quirk_piix3_usb }, { PCI_FIXUP_HEADER, PCI_ANY_ID, PCI_ANY_ID, quirk_ide_bases }, + { PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_bridge }, { PCI_FIXUP_FINAL, PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy }, #ifdef CONFIG_X86_IO_APIC diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/pnp/Config.in linux.21pre4-ac2/drivers/pnp/Config.in --- linux.21pre4/drivers/pnp/Config.in 2003-01-29 16:26:56.000000000 +0000 +++ linux.21pre4-ac2/drivers/pnp/Config.in 2003-01-06 15:42:40.000000000 +0000 @@ -8,4 +8,8 @@ dep_tristate ' ISA Plug and Play support' CONFIG_ISAPNP $CONFIG_PNP +if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then + dep_bool ' PNPBIOS support (EXPERIMENTAL)' CONFIG_PNPBIOS $CONFIG_PNP +fi + endmenu diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/pnp/Makefile linux.21pre4-ac2/drivers/pnp/Makefile --- linux.21pre4/drivers/pnp/Makefile 2003-01-29 16:26:56.000000000 +0000 +++ linux.21pre4-ac2/drivers/pnp/Makefile 2003-01-06 15:42:50.000000000 +0000 @@ -10,15 +10,22 @@ O_TARGET := pnp.o -export-objs := isapnp.o -list-multi := isa-pnp.o +export-objs := isapnp.o pnpbios_core.o +multi-objs := isa-pnp.o pnpbios.o -proc-$(CONFIG_PROC_FS) = isapnp_proc.o -isa-pnp-objs := isapnp.o quirks.o $(proc-y) +isa-pnp-proc-$(CONFIG_PROC_FS) = isapnp_proc.o +pnpbios-proc-$(CONFIG_PROC_FS) = pnpbios_proc.o + +isa-pnp-objs := isapnp.o quirks.o $(isa-pnp-proc-y) +pnpbios-objs := pnpbios_core.o $(pnpbios-proc-y) obj-$(CONFIG_ISAPNP) += isa-pnp.o +obj-$(CONFIG_PNPBIOS) += pnpbios.o include $(TOPDIR)/Rules.make isa-pnp.o: $(isa-pnp-objs) $(LD) $(LD_RFLAG) -r -o $@ $(isa-pnp-objs) + +pnpbios.o: $(pnpbios-objs) + $(LD) $(LD_RFLAG) -r -o $@ $(pnpbios-objs) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/pnp/pnpbios_core.c linux.21pre4-ac2/drivers/pnp/pnpbios_core.c --- linux.21pre4/drivers/pnp/pnpbios_core.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/pnp/pnpbios_core.c 2003-01-06 15:42:50.000000000 +0000 @@ -0,0 +1,1352 @@ +/* + * PnP BIOS services + * + * Originally (C) 1998 Christian Schmidt + * Modifications (c) 1998 Tom Lees + * Minor reorganizations by David Hinds + * Modifications (c) 2001,2002 by Thomas Hood + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * References: + * Compaq Computer Corporation, Phoenix Technologies Ltd., Intel Corporation + * Plug and Play BIOS Specification, Version 1.0A, May 5, 1994 + * Plug and Play BIOS Clarification Paper, October 6, 1994 + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * + * PnP BIOS INTERFACE + * + */ + +/* PnP BIOS signature: "$PnP" */ +#define PNP_SIGNATURE (('$' << 0) + ('P' << 8) + ('n' << 16) + ('P' << 24)) + +#pragma pack(1) +union pnp_bios_expansion_header { + struct { + u32 signature; /* "$PnP" */ + u8 version; /* in BCD */ + u8 length; /* length in bytes, currently 21h */ + u16 control; /* system capabilities */ + u8 checksum; /* all bytes must add up to 0 */ + + u32 eventflag; /* phys. address of the event flag */ + u16 rmoffset; /* real mode entry point */ + u16 rmcseg; + u16 pm16offset; /* 16 bit protected mode entry */ + u32 pm16cseg; + u32 deviceID; /* EISA encoded system ID or 0 */ + u16 rmdseg; /* real mode data segment */ + u32 pm16dseg; /* 16 bit pm data segment base */ + } fields; + char chars[0x21]; /* To calculate the checksum */ +}; +#pragma pack() + +static struct { + u16 offset; + u16 segment; +} pnp_bios_callpoint; + +static union pnp_bios_expansion_header * pnp_bios_hdr = NULL; + +/* The PnP BIOS entries in the GDT */ +#define PNP_GDT (0x0060) +#define PNP_CS32 (PNP_GDT+0x00) /* segment for calling fn */ +#define PNP_CS16 (PNP_GDT+0x08) /* code segment for BIOS */ +#define PNP_DS (PNP_GDT+0x10) /* data segment for BIOS */ +#define PNP_TS1 (PNP_GDT+0x18) /* transfer data segment */ +#define PNP_TS2 (PNP_GDT+0x20) /* another data segment */ + +/* + * These are some opcodes for a "static asmlinkage" + * As this code is *not* executed inside the linux kernel segment, but in a + * alias at offset 0, we need a far return that can not be compiled by + * default (please, prove me wrong! this is *really* ugly!) + * This is the only way to get the bios to return into the kernel code, + * because the bios code runs in 16 bit protected mode and therefore can only + * return to the caller if the call is within the first 64kB, and the linux + * kernel begins at offset 3GB... + */ + +asmlinkage void pnp_bios_callfunc(void); + +__asm__( + ".text \n" + __ALIGN_STR "\n" + SYMBOL_NAME_STR(pnp_bios_callfunc) ":\n" + " pushl %edx \n" + " pushl %ecx \n" + " pushl %ebx \n" + " pushl %eax \n" + " lcallw " SYMBOL_NAME_STR(pnp_bios_callpoint) "\n" + " addl $16, %esp \n" + " lret \n" + ".previous \n" +); + +#define Q_SET_SEL(selname, address, size) \ +set_base (gdt [(selname) >> 3], __va((u32)(address))); \ +set_limit (gdt [(selname) >> 3], size) + +#define Q2_SET_SEL(selname, address, size) \ +set_base (gdt [(selname) >> 3], (u32)(address)); \ +set_limit (gdt [(selname) >> 3], size) + +/* + * At some point we want to use this stack frame pointer to unwind + * after PnP BIOS oopses. + */ + +u32 pnp_bios_fault_esp; +u32 pnp_bios_fault_eip; +u32 pnp_bios_is_utter_crap = 0; + +static spinlock_t pnp_bios_lock; + +static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3, + u16 arg4, u16 arg5, u16 arg6, u16 arg7, + void *ts1_base, u32 ts1_size, + void *ts2_base, u32 ts2_size) +{ + unsigned long flags; + u16 status; + + /* + * PnP BIOSes are generally not terribly re-entrant. + * Also, don't rely on them to save everything correctly. + */ + if(pnp_bios_is_utter_crap) + return PNP_FUNCTION_NOT_SUPPORTED; + + /* On some boxes IRQ's during PnP BIOS calls are deadly. */ + spin_lock_irqsave(&pnp_bios_lock, flags); + + if (ts1_size) + Q2_SET_SEL(PNP_TS1, ts1_base, ts1_size); + if (ts2_size) + Q2_SET_SEL(PNP_TS2, ts2_base, ts2_size); + + __asm__ __volatile__( + "pushl %%ebp\n\t" + "pushl %%edi\n\t" + "pushl %%esi\n\t" + "pushl %%ds\n\t" + "pushl %%es\n\t" + "pushl %%fs\n\t" + "pushl %%gs\n\t" + "pushfl\n\t" + "movl %%esp, pnp_bios_fault_esp\n\t" + "movl $1f, pnp_bios_fault_eip\n\t" + "lcall %5,%6\n\t" + "1:popfl\n\t" + "popl %%gs\n\t" + "popl %%fs\n\t" + "popl %%es\n\t" + "popl %%ds\n\t" + "popl %%esi\n\t" + "popl %%edi\n\t" + "popl %%ebp\n\t" + : "=a" (status) + : "0" ((func) | (((u32)arg1) << 16)), + "b" ((arg2) | (((u32)arg3) << 16)), + "c" ((arg4) | (((u32)arg5) << 16)), + "d" ((arg6) | (((u32)arg7) << 16)), + "i" (PNP_CS32), + "i" (0) + : "memory" + ); + spin_unlock_irqrestore(&pnp_bios_lock, flags); + + /* If we get here and this is set then the PnP BIOS faulted on us. */ + if(pnp_bios_is_utter_crap) + { + printk(KERN_ERR "PnPBIOS: Warning! Your PnP BIOS caused a fatal error. Attempting to continue.\n"); + printk(KERN_ERR "PnPBIOS: You may need to reboot with the \"pnpbios=off\" option to operate stably.\n"); + printk(KERN_ERR "PnPBIOS: Check with your vendor for an updated BIOS.\n"); + } + + return status; +} + + +/* + * + * UTILITY FUNCTIONS + * + */ + +static void pnpbios_warn_unexpected_status(const char * module, u16 status) +{ + printk(KERN_ERR "PnPBIOS: %s: Unexpected status 0x%x\n", module, status); +} + +void *pnpbios_kmalloc(size_t size, int f) +{ + void *p = kmalloc( size, f ); + if ( p == NULL ) + printk(KERN_ERR "PnPBIOS: kmalloc() failed\n"); + return p; +} + +/* + * Call this only after init time + */ +static inline int pnp_bios_present(void) +{ + return (pnp_bios_hdr != NULL); +} + +/* Forward declaration */ +static void update_devlist( u8 nodenum, struct pnp_bios_node *data ); + + +/* + * + * PnP BIOS ACCESS FUNCTIONS + * + */ + +#define PNP_GET_NUM_SYS_DEV_NODES 0x00 +#define PNP_GET_SYS_DEV_NODE 0x01 +#define PNP_SET_SYS_DEV_NODE 0x02 +#define PNP_GET_EVENT 0x03 +#define PNP_SEND_MESSAGE 0x04 +#define PNP_GET_DOCKING_STATION_INFORMATION 0x05 +#define PNP_SET_STATIC_ALLOCED_RES_INFO 0x09 +#define PNP_GET_STATIC_ALLOCED_RES_INFO 0x0a +#define PNP_GET_APM_ID_TABLE 0x0b +#define PNP_GET_PNP_ISA_CONFIG_STRUC 0x40 +#define PNP_GET_ESCD_INFO 0x41 +#define PNP_READ_ESCD 0x42 +#define PNP_WRITE_ESCD 0x43 + +/* + * Call PnP BIOS with function 0x00, "get number of system device nodes" + */ +static int __pnp_bios_dev_node_info(struct pnp_dev_node_info *data) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_NUM_SYS_DEV_NODES, 0, PNP_TS1, 2, PNP_TS1, PNP_DS, 0, 0, + data, sizeof(struct pnp_dev_node_info), 0, 0); + data->no_nodes &= 0xff; + return status; +} + +int pnp_bios_dev_node_info(struct pnp_dev_node_info *data) +{ + int status = __pnp_bios_dev_node_info( data ); + if ( status ) + pnpbios_warn_unexpected_status( "dev_node_info", status ); + return status; +} + +/* + * Note that some PnP BIOSes (e.g., on Sony Vaio laptops) die a horrible + * death if they are asked to access the "current" configuration. + * Therefore, if it's a matter of indifference, it's better to call + * get_dev_node() and set_dev_node() with boot=1 rather than with boot=0. + */ + +/* + * Call PnP BIOS with function 0x01, "get system device node" + * Input: *nodenum = desired node, + * boot = whether to get nonvolatile boot (!=0) + * or volatile current (0) config + * Output: *nodenum=next node or 0xff if no more nodes + */ +static int __pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + if ( !boot & pnpbios_dont_use_current_config ) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_SYS_DEV_NODE, 0, PNP_TS1, 0, PNP_TS2, boot ? 2 : 1, PNP_DS, 0, + nodenum, sizeof(char), data, 65536); + return status; +} + +int pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data) +{ + int status; + status = __pnp_bios_get_dev_node( nodenum, boot, data ); + if ( status ) + pnpbios_warn_unexpected_status( "get_dev_node", status ); + return status; +} + + +/* + * Call PnP BIOS with function 0x02, "set system device node" + * Input: *nodenum = desired node, + * boot = whether to set nonvolatile boot (!=0) + * or volatile current (0) config + */ +static int __pnp_bios_set_dev_node(u8 nodenum, char boot, struct pnp_bios_node *data) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + if ( !boot & pnpbios_dont_use_current_config ) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_SET_SYS_DEV_NODE, nodenum, 0, PNP_TS1, boot ? 2 : 1, PNP_DS, 0, 0, + data, 65536, 0, 0); + return status; +} + +int pnp_bios_set_dev_node(u8 nodenum, char boot, struct pnp_bios_node *data) +{ + int status; + status = __pnp_bios_set_dev_node( nodenum, boot, data ); + if ( status ) { + pnpbios_warn_unexpected_status( "set_dev_node", status ); + return status; + } + if ( !boot ) { /* Update devlist */ + u8 thisnodenum = nodenum; + status = pnp_bios_get_dev_node( &nodenum, boot, data ); + if ( status ) + return status; + update_devlist( thisnodenum, data ); + } + return status; +} + +#if needed +/* + * Call PnP BIOS with function 0x03, "get event" + */ +static int pnp_bios_get_event(u16 *event) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_EVENT, 0, PNP_TS1, PNP_DS, 0, 0 ,0 ,0, + event, sizeof(u16), 0, 0); + return status; +} +#endif + +#if needed +/* + * Call PnP BIOS with function 0x04, "send message" + */ +static int pnp_bios_send_message(u16 message) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_SEND_MESSAGE, message, PNP_DS, 0, 0, 0, 0, 0, 0, 0, 0, 0); + return status; +} +#endif + +#ifdef CONFIG_HOTPLUG +/* + * Call PnP BIOS with function 0x05, "get docking station information" + */ +static int pnp_bios_dock_station_info(struct pnp_docking_station_info *data) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_DOCKING_STATION_INFORMATION, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0, + data, sizeof(struct pnp_docking_station_info), 0, 0); + return status; +} +#endif + +#if needed +/* + * Call PnP BIOS with function 0x09, "set statically allocated resource + * information" + */ +static int pnp_bios_set_stat_res(char *info) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_SET_STATIC_ALLOCED_RES_INFO, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0, + info, *((u16 *) info), 0, 0); + return status; +} +#endif + +/* + * Call PnP BIOS with function 0x0a, "get statically allocated resource + * information" + */ +static int __pnp_bios_get_stat_res(char *info) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_STATIC_ALLOCED_RES_INFO, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0, + info, 65536, 0, 0); + return status; +} + +int pnp_bios_get_stat_res(char *info) +{ + int status; + status = __pnp_bios_get_stat_res( info ); + if ( status ) + pnpbios_warn_unexpected_status( "get_stat_res", status ); + return status; +} + +#if needed +/* + * Call PnP BIOS with function 0x0b, "get APM id table" + */ +static int pnp_bios_apm_id_table(char *table, u16 *size) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_APM_ID_TABLE, 0, PNP_TS2, 0, PNP_TS1, PNP_DS, 0, 0, + table, *size, size, sizeof(u16)); + return status; +} +#endif + +/* + * Call PnP BIOS with function 0x40, "get isa pnp configuration structure" + */ +static int __pnp_bios_isapnp_config(struct pnp_isa_config_struc *data) +{ + u16 status; + if (!pnp_bios_present()) + return PNP_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_PNP_ISA_CONFIG_STRUC, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0, + data, sizeof(struct pnp_isa_config_struc), 0, 0); + return status; +} + +int pnp_bios_isapnp_config(struct pnp_isa_config_struc *data) +{ + int status; + status = __pnp_bios_isapnp_config( data ); + if ( status ) + pnpbios_warn_unexpected_status( "isapnp_config", status ); + return status; +} + +/* + * Call PnP BIOS with function 0x41, "get ESCD info" + */ +static int __pnp_bios_escd_info(struct escd_info_struc *data) +{ + u16 status; + if (!pnp_bios_present()) + return ESCD_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_GET_ESCD_INFO, 0, PNP_TS1, 2, PNP_TS1, 4, PNP_TS1, PNP_DS, + data, sizeof(struct escd_info_struc), 0, 0); + return status; +} + +int pnp_bios_escd_info(struct escd_info_struc *data) +{ + int status; + status = __pnp_bios_escd_info( data ); + if ( status ) + pnpbios_warn_unexpected_status( "escd_info", status ); + return status; +} + +/* + * Call PnP BIOS function 0x42, "read ESCD" + * nvram_base is determined by calling escd_info + */ +static int __pnp_bios_read_escd(char *data, u32 nvram_base) +{ + u16 status; + if (!pnp_bios_present()) + return ESCD_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_READ_ESCD, 0, PNP_TS1, PNP_TS2, PNP_DS, 0, 0, 0, + data, 65536, (void *)nvram_base, 65536); + return status; +} + +int pnp_bios_read_escd(char *data, u32 nvram_base) +{ + int status; + status = __pnp_bios_read_escd( data, nvram_base ); + if ( status ) + pnpbios_warn_unexpected_status( "read_escd", status ); + return status; +} + +#if needed +/* + * Call PnP BIOS function 0x43, "write ESCD" + */ +static int pnp_bios_write_escd(char *data, u32 nvram_base) +{ + u16 status; + if (!pnp_bios_present()) + return ESCD_FUNCTION_NOT_SUPPORTED; + status = call_pnp_bios(PNP_WRITE_ESCD, 0, PNP_TS1, PNP_TS2, PNP_DS, 0, 0, 0, + data, 65536, nvram_base, 65536); + return status; +} +#endif + + +/* + * + * DOCKING FUNCTIONS + * + */ + +#ifdef CONFIG_HOTPLUG + +static int unloading = 0; +static struct completion unload_sem; + +/* + * (Much of this belongs in a shared routine somewhere) + */ + +static int pnp_dock_event(int dock, struct pnp_docking_station_info *info) +{ + char *argv [3], **envp, *buf, *scratch; + int i = 0, value; + + if (!hotplug_path [0]) + return -ENOENT; + if (!current->fs->root) { + return -EAGAIN; + } + if (!(envp = (char **) pnpbios_kmalloc (20 * sizeof (char *), GFP_KERNEL))) { + return -ENOMEM; + } + if (!(buf = pnpbios_kmalloc (256, GFP_KERNEL))) { + kfree (envp); + return -ENOMEM; + } + + /* only one standardized param to hotplug command: type */ + argv [0] = hotplug_path; + argv [1] = "dock"; + argv [2] = 0; + + /* minimal command environment */ + envp [i++] = "HOME=/"; + envp [i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; + +#ifdef DEBUG + /* hint that policy agent should enter no-stdout debug mode */ + envp [i++] = "DEBUG=kernel"; +#endif + /* extensible set of named bus-specific parameters, + * supporting multiple driver selection algorithms. + */ + scratch = buf; + + /* action: add, remove */ + envp [i++] = scratch; + scratch += sprintf (scratch, "ACTION=%s", dock?"add":"remove") + 1; + + /* Report the ident for the dock */ + envp [i++] = scratch; + scratch += sprintf (scratch, "DOCK=%x/%x/%x", + info->location_id, info->serial, info->capabilities); + envp[i] = 0; + + value = call_usermodehelper (argv [0], argv, envp); + kfree (buf); + kfree (envp); + return 0; +} + +/* + * Poll the PnP docking at regular intervals + */ +static int pnp_dock_thread(void * unused) +{ + static struct pnp_docking_station_info now; + int docked = -1, d = 0; + daemonize(); + reparent_to_init(); + strcpy(current->comm, "kpnpbiosd"); + while(!unloading && !signal_pending(current)) + { + int status; + + /* + * Poll every 2 seconds + */ + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ*2); + if(signal_pending(current)) + break; + + status = pnp_bios_dock_station_info(&now); + + switch(status) + { + /* + * No dock to manage + */ + case PNP_FUNCTION_NOT_SUPPORTED: + complete_and_exit(&unload_sem, 0); + case PNP_SYSTEM_NOT_DOCKED: + d = 0; + break; + case PNP_SUCCESS: + d = 1; + break; + default: + pnpbios_warn_unexpected_status( "pnp_dock_thread", status ); + continue; + } + if(d != docked) + { + if(pnp_dock_event(d, &now)==0) + { + docked = d; +#if 0 + printk(KERN_INFO "PnPBIOS: Docking station %stached\n", docked?"at":"de"); +#endif + } + } + } + complete_and_exit(&unload_sem, 0); +} + +#endif /* CONFIG_HOTPLUG */ + + +/* + * + * NODE DATA PARSING FUNCTIONS + * + */ + +static void add_irqresource(struct pci_dev *dev, int irq) +{ + int i = 0; + while (!(dev->irq_resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_IRQ) i++; + if (i < DEVICE_COUNT_IRQ) { + dev->irq_resource[i].start = (unsigned long) irq; + dev->irq_resource[i].flags = IORESOURCE_IRQ; // Also clears _UNSET flag + } +} + +static void add_dmaresource(struct pci_dev *dev, int dma) +{ + int i = 0; + while (!(dev->dma_resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_DMA) i++; + if (i < DEVICE_COUNT_DMA) { + dev->dma_resource[i].start = (unsigned long) dma; + dev->dma_resource[i].flags = IORESOURCE_DMA; // Also clears _UNSET flag + } +} + +static void add_ioresource(struct pci_dev *dev, int io, int len) +{ + int i = 0; + while (!(dev->resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_RESOURCE) i++; + if (i < DEVICE_COUNT_RESOURCE) { + dev->resource[i].start = (unsigned long) io; + dev->resource[i].end = (unsigned long)(io + len - 1); + dev->resource[i].flags = IORESOURCE_IO; // Also clears _UNSET flag + } +} + +static void add_memresource(struct pci_dev *dev, int mem, int len) +{ + int i = 0; + while (!(dev->resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_RESOURCE) i++; + if (i < DEVICE_COUNT_RESOURCE) { + dev->resource[i].start = (unsigned long) mem; + dev->resource[i].end = (unsigned long)(mem + len - 1); + dev->resource[i].flags = IORESOURCE_MEM; // Also clears _UNSET flag + } +} + +static void node_resource_data_to_dev(struct pnp_bios_node *node, struct pci_dev *dev) +{ + unsigned char *p = node->data, *lastp=NULL; + int i; + + /* + * First, set resource info to default values + */ + for (i=0;iresource[i].start = 0; // "disabled" + dev->resource[i].flags = IORESOURCE_UNSET; + } + for (i=0;iirq_resource[i].start = (unsigned long)-1; // "disabled" + dev->irq_resource[i].flags = IORESOURCE_UNSET; + } + for (i=0;idma_resource[i].start = (unsigned long)-1; // "disabled" + dev->dma_resource[i].flags = IORESOURCE_UNSET; + } + + /* + * Fill in dev resource info + */ + while ( (char *)p < ((char *)node->data + node->size )) { + if(p==lastp) break; + + if( p[0] & 0x80 ) {// large item + switch (p[0] & 0x7f) { + case 0x01: // memory + { + int io = *(short *) &p[4]; + int len = *(short *) &p[10]; + add_memresource(dev, io, len); + break; + } + case 0x02: // device name + { + int len = *(short *) &p[1]; + memcpy(dev->name, p + 3, len >= 80 ? 79 : len); + break; + } + case 0x05: // 32-bit memory + { + int io = *(int *) &p[4]; + int len = *(int *) &p[16]; + add_memresource(dev, io, len); + break; + } + case 0x06: // fixed location 32-bit memory + { + int io = *(int *) &p[4]; + int len = *(int *) &p[8]; + add_memresource(dev, io, len); + break; + } + } /* switch */ + lastp = p+3; + p = p + p[1] + p[2]*256 + 3; + continue; + } + if ((p[0]>>3) == 0x0f) // end tag + break; + switch (p[0]>>3) { + case 0x04: // irq + { + int i, mask, irq = -1; + mask= p[1] + p[2]*256; + for (i=0;i<16;i++, mask=mask>>1) + if(mask & 0x01) irq=i; + add_irqresource(dev, irq); + break; + } + case 0x05: // dma + { + int i, mask, dma = -1; + mask = p[1]; + for (i=0;i<8;i++, mask = mask>>1) + if(mask & 0x01) dma=i; + add_dmaresource(dev, dma); + break; + } + case 0x08: // io + { + int io= p[2] + p[3] *256; + int len = p[7]; + add_ioresource(dev, io, len); + break; + } + case 0x09: // fixed location io + { + int io = p[1] + p[2] * 256; + int len = p[3]; + add_ioresource(dev, io, len); + break; + } + } /* switch */ + lastp=p+1; + p = p + (p[0] & 0x07) + 1; + + } /* while */ + + return; +} + + +/* + * + * DEVICE LIST MANAGEMENT FUNCTIONS + * + * + * Some of these are exported to give public access + * + * Question: Why maintain a device list when the PnP BIOS can + * list devices for us? Answer: Some PnP BIOSes can't report + * the current configuration, only the boot configuration. + * The boot configuration can be changed, so we need to keep + * a record of what the configuration was when we booted; + * presumably it continues to describe the current config. + * For those BIOSes that can change the current config, we + * keep the information in the devlist up to date. + * + * Note that it is currently assumed that the list does not + * grow or shrink in size after init time, and slot_name + * never changes. The list is protected by a spinlock. + */ + +static LIST_HEAD(pnpbios_devices); + +static spinlock_t pnpbios_devices_lock; + +static int inline insert_device(struct pci_dev *dev) +{ + + /* + * FIXME: Check for re-add of existing node; + * return -1 if node already present + */ + + /* We don't lock because we only do this at init time */ + list_add_tail(&dev->global_list, &pnpbios_devices); + + return 0; +} + +#define HEX(id,a) hex[((id)>>a) & 15] +#define CHAR(id,a) (0x40 + (((id)>>a) & 31)) +// +static void inline pnpid32_to_pnpid(u32 id, char *str) +{ + const char *hex = "0123456789abcdef"; + + id = be32_to_cpu(id); + str[0] = CHAR(id, 26); + str[1] = CHAR(id, 21); + str[2] = CHAR(id,16); + str[3] = HEX(id, 12); + str[4] = HEX(id, 8); + str[5] = HEX(id, 4); + str[6] = HEX(id, 0); + str[7] = '\0'; + + return; +} +// +#undef CHAR +#undef HEX + +/* + * Build a linked list of pci_devs in order of ascending node number + * Called only at init time. + */ +static void __init build_devlist(void) +{ + u8 nodenum; + unsigned int nodes_got = 0; + unsigned int devs = 0; + struct pnp_bios_node *node; + struct pnp_dev_node_info node_info; + struct pci_dev *dev; + + if (!pnp_bios_present()) + return; + + if (pnp_bios_dev_node_info(&node_info) != 0) + return; + + node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); + if (!node) + return; + + for(nodenum=0; nodenum<0xff; ) { + u8 thisnodenum = nodenum; + /* We build the list from the "boot" config because + * asking for the "current" config causes some + * BIOSes to crash. + */ + if (pnp_bios_get_dev_node(&nodenum, (char )1 , node)) + break; + nodes_got++; + dev = pnpbios_kmalloc(sizeof (struct pci_dev), GFP_KERNEL); + if (!dev) + break; + memset(dev,0,sizeof(struct pci_dev)); + dev->devfn = thisnodenum; + memcpy(dev->name,"PNPBIOS",8); + pnpid32_to_pnpid(node->eisa_id,dev->slot_name); + node_resource_data_to_dev(node,dev); + if(insert_device(dev)<0) + kfree(dev); + else + devs++; + if (nodenum <= thisnodenum) { + printk(KERN_ERR "PnPBIOS: build_devlist: Node number 0x%x is out of sequence following node 0x%x. Aborting.\n", (unsigned int)nodenum, (unsigned int)thisnodenum); + break; + } + } + kfree(node); + + printk(KERN_INFO "PnPBIOS: %i node%s reported by PnP BIOS; %i recorded by driver\n", + nodes_got, nodes_got != 1 ? "s" : "", devs); +} + +static struct pci_dev *find_device_by_nodenum( u8 nodenum ) +{ + struct pci_dev *dev; + + pnpbios_for_each_dev(dev) { + if(dev->devfn == nodenum) + return dev; + } + + return NULL; +} + +static void update_devlist( u8 nodenum, struct pnp_bios_node *data ) +{ + unsigned long flags; + struct pci_dev *dev; + + spin_lock_irqsave(&pnpbios_devices_lock, flags); + dev = find_device_by_nodenum( nodenum ); + if ( dev ) { + node_resource_data_to_dev(data,dev); + } + spin_unlock_irqrestore(&pnpbios_devices_lock, flags); + + return; +} + + +/* + * + * DRIVER REGISTRATION FUNCTIONS + * + * + * Exported to give public access + * + */ + +static LIST_HEAD(pnpbios_drivers); + +static const struct pnpbios_device_id * +match_device(const struct pnpbios_device_id *ids, const struct pci_dev *dev) +{ + while (*ids->id) + { + if(memcmp(ids->id, dev->slot_name, 7)==0) + return ids; + ids++; + } + return NULL; +} + +static int announce_device(struct pnpbios_driver *drv, struct pci_dev *dev) +{ + const struct pnpbios_device_id *id; + struct pci_dev tmpdev; + int ret; + + if (drv->id_table) { + id = match_device(drv->id_table, dev); + if (!id) + return 0; + } else + id = NULL; + + memcpy( &tmpdev, dev, sizeof(struct pci_dev)); + tmpdev.global_list.prev = NULL; + tmpdev.global_list.next = NULL; + + dev_probe_lock(); + /* Obviously, probe() should not call any pnpbios functions */ + ret = drv->probe(&tmpdev, id); + dev_probe_unlock(); + if (ret < 1) + return 0; + + dev->driver = (void *)drv; + + return 1; +} + +/** + * pnpbios_register_driver - register a new pci driver + * @drv: the driver structure to register + * + * Adds the driver structure to the list of registered drivers + * + * For each device in the pnpbios device list that matches one of + * the ids in drv->id_table, calls the driver's "probe" function with + * arguments (1) a pointer to a *temporary* struct pci_dev containing + * resource info for the device, and (2) a pointer to the id string + * of the device. Expects the probe function to return 1 if the + * driver claims the device (otherwise 0) in which case, marks the + * device as having this driver. + * + * Returns the number of pci devices which were claimed by the driver + * during registration. The driver remains registered even if the + * return value is zero. + */ +int pnpbios_register_driver(struct pnpbios_driver *drv) +{ + struct pci_dev *dev; + unsigned long flags; + int count = 0; + + list_add_tail(&drv->node, &pnpbios_drivers); + spin_lock_irqsave(&pnpbios_devices_lock, flags); + pnpbios_for_each_dev(dev) { + if (!pnpbios_dev_driver(dev)) + count += announce_device(drv, dev); + } + spin_unlock_irqrestore(&pnpbios_devices_lock, flags); + return count; +} + +EXPORT_SYMBOL(pnpbios_register_driver); + +/** + * pnpbios_unregister_driver - unregister a pci driver + * @drv: the driver structure to unregister + * + * Deletes the driver structure from the list of registered PnPBIOS + * drivers, gives it a chance to clean up by calling its "remove" + * function for each device it was responsible for, and marks those + * devices as driverless. + */ +void pnpbios_unregister_driver(struct pnpbios_driver *drv) +{ + unsigned long flags; + struct pci_dev *dev; + + list_del(&drv->node); + spin_lock_irqsave(&pnpbios_devices_lock, flags); + pnpbios_for_each_dev(dev) { + if (dev->driver == (void *)drv) { + if (drv->remove) + drv->remove(dev); + dev->driver = NULL; + } + } + spin_unlock_irqrestore(&pnpbios_devices_lock, flags); +} + +EXPORT_SYMBOL(pnpbios_unregister_driver); + + +/* + * + * RESOURCE RESERVATION FUNCTIONS + * + * + * Used only at init time + * + */ + +static void __init reserve_ioport_range(char *pnpid, int start, int end) +{ + struct resource *res; + char *regionid; + +#if 0 + /* + * TEMPORARY hack to work around the fact that the + * floppy driver inappropriately reserves ioports 0x3f0 and 0x3f1 + * Remove this once the floppy driver is fixed. + */ + if ( + (0x3f0 >= start && 0x3f0 <= end) + || (0x3f1 >= start && 0x3f1 <= end) + ) { + printk(KERN_INFO + "PnPBIOS: %s: ioport range 0x%x-0x%x NOT reserved\n", + pnpid, start, end + ); + return; + } +#endif + + regionid = pnpbios_kmalloc(16, GFP_KERNEL); + if ( regionid == NULL ) + return; + snprintf(regionid, 16, "PnPBIOS %s", pnpid); + res = request_region(start,end-start+1,regionid); + if ( res == NULL ) + kfree( regionid ); + else + res->flags &= ~IORESOURCE_BUSY; + /* + * Failures at this point are usually harmless. pci quirks for + * example do reserve stuff they know about too, so we may well + * have double reservations. + */ + printk(KERN_INFO + "PnPBIOS: %s: ioport range 0x%x-0x%x %s reserved\n", + pnpid, start, end, + NULL != res ? "has been" : "could not be" + ); + + return; +} + +static void __init reserve_resources_of_dev( struct pci_dev *dev ) +{ + int i; + + for (i=0;iresource[i].flags & IORESOURCE_UNSET ) + /* end of resources */ + break; + if (dev->resource[i].flags & IORESOURCE_IO) { + /* ioport */ + if ( dev->resource[i].start == 0 ) + /* disabled */ + /* Do nothing */ + continue; + if ( dev->resource[i].start < 0x100 ) + /* + * Below 0x100 is only standard PC hardware + * (pics, kbd, timer, dma, ...) + * We should not get resource conflicts there, + * and the kernel reserves these anyway + * (see arch/i386/kernel/setup.c). + * So, do nothing + */ + continue; + if ( dev->resource[i].end < dev->resource[i].start ) + /* invalid endpoint */ + /* Do nothing */ + continue; + reserve_ioport_range( + dev->slot_name, + dev->resource[i].start, + dev->resource[i].end + ); + } else if (dev->resource[i].flags & IORESOURCE_MEM) { + /* iomem */ + /* For now do nothing */ + continue; + } else { + /* Neither ioport nor iomem */ + /* Do nothing */ + continue; + } + } + + return; +} + +static void __init reserve_resources( void ) +{ + struct pci_dev *dev; + + pnpbios_for_each_dev(dev) { + if ( + 0 != strcmp(dev->slot_name,"PNP0c01") && /* memory controller */ + 0 != strcmp(dev->slot_name,"PNP0c02") /* system peripheral: other */ + ) { + continue; + } + reserve_resources_of_dev(dev); + } + + return; +} + + +/* + * + * INIT AND EXIT + * + */ + +extern int is_sony_vaio_laptop; + +static int pnpbios_disabled; /* = 0 */ +static int dont_reserve_resources; /* = 0 */ +int pnpbios_dont_use_current_config; /* = 0 */ + +#ifndef MODULE +static int __init pnpbios_setup(char *str) +{ + int invert; + + while ((str != NULL) && (*str != '\0')) { + if (strncmp(str, "off", 3) == 0) + pnpbios_disabled=1; + if (strncmp(str, "on", 2) == 0) + pnpbios_disabled=0; + invert = (strncmp(str, "no-", 3) == 0); + if (invert) + str += 3; + if (strncmp(str, "curr", 4) == 0) + pnpbios_dont_use_current_config = invert; + if (strncmp(str, "res", 3) == 0) + dont_reserve_resources = invert; + str = strchr(str, ','); + if (str != NULL) + str += strspn(str, ", \t"); + } + + return 1; +} + +__setup("pnpbios=", pnpbios_setup); +#endif + +int __init pnpbios_init(void) +{ + union pnp_bios_expansion_header *check; + u8 sum; + int i, length, r; + + spin_lock_init(&pnp_bios_lock); + spin_lock_init(&pnpbios_devices_lock); + + if(pnpbios_disabled) { + printk(KERN_INFO "PnPBIOS: Disabled\n"); + return -ENODEV; + } + + if ( is_sony_vaio_laptop ) + pnpbios_dont_use_current_config = 1; + + /* + * Search the defined area (0xf0000-0xffff0) for a valid PnP BIOS + * structure and, if one is found, sets up the selectors and + * entry points + */ + for (check = (union pnp_bios_expansion_header *) __va(0xf0000); + check < (union pnp_bios_expansion_header *) __va(0xffff0); + ((void *) (check)) += 16) { + if (check->fields.signature != PNP_SIGNATURE) + continue; + length = check->fields.length; + if (!length) + continue; + for (sum = 0, i = 0; i < length; i++) + sum += check->chars[i]; + if (sum) + continue; + if (check->fields.version < 0x10) { + printk(KERN_WARNING "PnPBIOS: PnP BIOS version %d.%d is not supported\n", + check->fields.version >> 4, + check->fields.version & 15); + continue; + } + printk(KERN_INFO "PnPBIOS: Found PnP BIOS installation structure at 0x%p\n", check); + printk(KERN_INFO "PnPBIOS: PnP BIOS version %d.%d, entry 0x%x:0x%x, dseg 0x%x\n", + check->fields.version >> 4, check->fields.version & 15, + check->fields.pm16cseg, check->fields.pm16offset, + check->fields.pm16dseg); + Q2_SET_SEL(PNP_CS32, &pnp_bios_callfunc, 64 * 1024); + Q_SET_SEL(PNP_CS16, check->fields.pm16cseg, 64 * 1024); + Q_SET_SEL(PNP_DS, check->fields.pm16dseg, 64 * 1024); + pnp_bios_callpoint.offset = check->fields.pm16offset; + pnp_bios_callpoint.segment = PNP_CS16; + pnp_bios_hdr = check; + break; + } + if (!pnp_bios_present()) + return -ENODEV; + build_devlist(); + if ( ! dont_reserve_resources ) + reserve_resources(); +#ifdef CONFIG_PROC_FS + r = pnpbios_proc_init(); + if (r) + return r; +#endif + return 0; +} + +static int pnpbios_thread_init(void) +{ +#ifdef CONFIG_HOTPLUG + init_completion(&unload_sem); + if(kernel_thread(pnp_dock_thread, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL)>0) + unloading = 0; +#endif + return 0; +} + +#ifndef MODULE + +/* init/main.c calls pnpbios_init early */ + +/* Start the kernel thread later: */ +module_init(pnpbios_thread_init); + +#else + +/* + * N.B.: Building pnpbios as a module hasn't been fully implemented + */ + +MODULE_LICENSE("GPL"); + +static int pnpbios_init_all(void) +{ + int r; + r = pnpbios_init(); + if (r) + return r; + r = pnpbios_thread_init(); + if (r) + return r; + return 0; +} + +static void __exit pnpbios_exit(void) +{ +#ifdef CONFIG_HOTPLUG + unloading = 1; + wait_for_completion(&unload_sem); +#endif + pnpbios_proc_exit(); + /* We ought to free resources here */ + return; +} + +module_init(pnpbios_init_all); +module_exit(pnpbios_exit); + +#endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/pnp/pnpbios_proc.c linux.21pre4-ac2/drivers/pnp/pnpbios_proc.c --- linux.21pre4/drivers/pnp/pnpbios_proc.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/pnp/pnpbios_proc.c 2003-01-06 15:42:50.000000000 +0000 @@ -0,0 +1,278 @@ +/* + * /proc/bus/pnp interface for Plug and Play devices + * + * Written by David Hinds, dahinds@users.sourceforge.net + * Modified by Thomas Hood, jdthood@mail.com + * + * The .../devices and .../ and .../boot/ files are + * utilized by the lspnp and setpnp utilities, supplied with the + * pcmcia-cs package. + * http://pcmcia-cs.sourceforge.net + * + * The .../escd file is utilized by the lsescd utility written by + * Gunther Mayer. + * http://home.t-online.de/home/gunther.mayer/lsescd + * + * The .../legacy_device_resources file is not used yet. + * + * The other files are human-readable. + */ + +//#include +#define __NO_VERSION__ +//#include + +#include +#include +#include +#include +#include +#include + +static struct proc_dir_entry *proc_pnp = NULL; +static struct proc_dir_entry *proc_pnp_boot = NULL; +static struct pnp_dev_node_info node_info; + +static int proc_read_pnpconfig(char *buf, char **start, off_t pos, + int count, int *eof, void *data) +{ + struct pnp_isa_config_struc pnps; + + if (pnp_bios_isapnp_config(&pnps)) + return -EIO; + return snprintf(buf, count, + "structure_revision %d\n" + "number_of_CSNs %d\n" + "ISA_read_data_port 0x%x\n", + pnps.revision, + pnps.no_csns, + pnps.isa_rd_data_port + ); +} + +static int proc_read_escdinfo(char *buf, char **start, off_t pos, + int count, int *eof, void *data) +{ + struct escd_info_struc escd; + + if (pnp_bios_escd_info(&escd)) + return -EIO; + return snprintf(buf, count, + "min_ESCD_write_size %d\n" + "ESCD_size %d\n" + "NVRAM_base 0x%x\n", + escd.min_escd_write_size, + escd.escd_size, + escd.nv_storage_base + ); +} + +#define MAX_SANE_ESCD_SIZE (32*1024) +static int proc_read_escd(char *buf, char **start, off_t pos, + int count, int *eof, void *data) +{ + struct escd_info_struc escd; + char *tmpbuf; + int escd_size, escd_left_to_read, n; + + if (pnp_bios_escd_info(&escd)) + return -EIO; + + /* sanity check */ + if (escd.escd_size > MAX_SANE_ESCD_SIZE) { + printk(KERN_ERR "PnPBIOS: proc_read_escd: ESCD size reported by BIOS escd_info call is too great\n"); + return -EFBIG; + } + + tmpbuf = pnpbios_kmalloc(escd.escd_size, GFP_KERNEL); + if (!tmpbuf) return -ENOMEM; + + if (pnp_bios_read_escd(tmpbuf, escd.nv_storage_base)) + return -EIO; + + escd_size = (unsigned char)(tmpbuf[0]) + (unsigned char)(tmpbuf[1])*256; + + /* sanity check */ + if (escd_size > MAX_SANE_ESCD_SIZE) { + printk(KERN_ERR "PnPBIOS: proc_read_escd: ESCD size reported by BIOS read_escd call is too great\n"); + return -EFBIG; + } + + escd_left_to_read = escd_size - pos; + if (escd_left_to_read < 0) escd_left_to_read = 0; + if (escd_left_to_read == 0) *eof = 1; + n = min(count,escd_left_to_read); + memcpy(buf, tmpbuf + pos, n); + kfree(tmpbuf); + *start = buf; + return n; +} + +static int proc_read_legacyres(char *buf, char **start, off_t pos, + int count, int *eof, void *data) +{ + /* Assume that the following won't overflow the buffer */ + if (pnp_bios_get_stat_res(buf)) + return -EIO; + + return count; // FIXME: Return actual length +} + +static int proc_read_devices(char *buf, char **start, off_t pos, + int count, int *eof, void *data) +{ + struct pnp_bios_node *node; + u8 nodenum; + char *p = buf; + + if (pos >= 0xff) + return 0; + + node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); + if (!node) return -ENOMEM; + + for (nodenum=pos; nodenum<0xff; ) { + u8 thisnodenum = nodenum; + /* 26 = the number of characters per line sprintf'ed */ + if ((p - buf + 26) > count) + break; + if (pnp_bios_get_dev_node(&nodenum, 1, node)) + break; + p += sprintf(p, "%02x\t%08x\t%02x:%02x:%02x\t%04x\n", + node->handle, node->eisa_id, + node->type_code[0], node->type_code[1], + node->type_code[2], node->flags); + if (nodenum <= thisnodenum) { + printk(KERN_ERR "%s Node number 0x%x is out of sequence following node 0x%x. Aborting.\n", "PnPBIOS: proc_read_devices:", (unsigned int)nodenum, (unsigned int)thisnodenum); + *eof = 1; + break; + } + } + kfree(node); + if (nodenum == 0xff) + *eof = 1; + *start = (char *)((off_t)nodenum - pos); + return p - buf; +} + +static int proc_read_node(char *buf, char **start, off_t pos, + int count, int *eof, void *data) +{ + struct pnp_bios_node *node; + int boot = (long)data >> 8; + u8 nodenum = (long)data; + int len; + + node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); + if (!node) return -ENOMEM; + if (pnp_bios_get_dev_node(&nodenum, boot, node)) + return -EIO; + len = node->size - sizeof(struct pnp_bios_node); + memcpy(buf, node->data, len); + kfree(node); + return len; +} + +static int proc_write_node(struct file *file, const char *buf, + unsigned long count, void *data) +{ + struct pnp_bios_node *node; + int boot = (long)data >> 8; + u8 nodenum = (long)data; + + node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); + if (!node) return -ENOMEM; + if ( pnp_bios_get_dev_node(&nodenum, boot, node) ) + return -EIO; + if (count != node->size - sizeof(struct pnp_bios_node)) + return -EINVAL; + memcpy(node->data, buf, count); + if (pnp_bios_set_dev_node(node->handle, boot, node) != 0) + return -EINVAL; + kfree(node); + return count; +} + +/* + * When this is called, pnpbios functions are assumed to + * work and the pnpbios_dont_use_current_config flag + * should already have been set to the appropriate value + */ +int __init pnpbios_proc_init( void ) +{ + struct pnp_bios_node *node; + struct proc_dir_entry *ent; + char name[3]; + u8 nodenum; + + if (pnp_bios_dev_node_info(&node_info)) + return -EIO; + + proc_pnp = proc_mkdir("pnp", proc_bus); + if (!proc_pnp) + return -EIO; + proc_pnp_boot = proc_mkdir("boot", proc_pnp); + if (!proc_pnp_boot) + return -EIO; + create_proc_read_entry("devices", 0, proc_pnp, proc_read_devices, NULL); + create_proc_read_entry("configuration_info", 0, proc_pnp, proc_read_pnpconfig, NULL); + create_proc_read_entry("escd_info", S_IRUSR, proc_pnp, proc_read_escdinfo, NULL); + create_proc_read_entry("escd", S_IRUSR, proc_pnp, proc_read_escd, NULL); + create_proc_read_entry("legacy_device_resources", 0, proc_pnp, proc_read_legacyres, NULL); + + node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); + if (!node) + return -ENOMEM; + + for (nodenum=0; nodenum<0xff; ) { + u8 thisnodenum = nodenum; + if (pnp_bios_get_dev_node(&nodenum, 1, node) != 0) + break; + sprintf(name, "%02x", node->handle); + if ( !pnpbios_dont_use_current_config ) { + ent = create_proc_entry(name, 0, proc_pnp); + if (ent) { + ent->read_proc = proc_read_node; + ent->write_proc = proc_write_node; + ent->data = (void *)(long)(node->handle); + } + } + ent = create_proc_entry(name, 0, proc_pnp_boot); + if (ent) { + ent->read_proc = proc_read_node; + ent->write_proc = proc_write_node; + ent->data = (void *)(long)(node->handle+0x100); + } + if (nodenum <= thisnodenum) { + printk(KERN_ERR "%s Node number 0x%x is out of sequence following node 0x%x. Aborting.\n", "PnPBIOS: proc_init:", (unsigned int)nodenum, (unsigned int)thisnodenum); + break; + } + } + kfree(node); + + return 0; +} + +void __exit pnpbios_proc_exit(void) +{ + int i; + char name[3]; + + if (!proc_pnp) return; + + for (i=0; i<0xff; i++) { + sprintf(name, "%02x", i); + if ( !pnpbios_dont_use_current_config ) + remove_proc_entry(name, proc_pnp); + remove_proc_entry(name, proc_pnp_boot); + } + remove_proc_entry("legacy_device_resources", proc_pnp); + remove_proc_entry("escd", proc_pnp); + remove_proc_entry("escd_info", proc_pnp); + remove_proc_entry("configuration_info", proc_pnp); + remove_proc_entry("devices", proc_pnp); + remove_proc_entry("boot", proc_pnp); + remove_proc_entry("pnp", proc_bus); + + return; +} diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/scsi/dpt_i2o.c linux.21pre4-ac2/drivers/scsi/dpt_i2o.c --- linux.21pre4/drivers/scsi/dpt_i2o.c 2003-01-29 16:26:50.000000000 +0000 +++ linux.21pre4-ac2/drivers/scsi/dpt_i2o.c 2003-01-08 15:26:13.000000000 +0000 @@ -1498,7 +1498,7 @@ pDev->next_lun; pDev = pDev->next_lun){ } pDev->next_lun = kmalloc(sizeof(struct adpt_device),GFP_KERNEL); - if(pDev == NULL) { + if(pDev->next_lun == NULL) { return -ENOMEM; } memset(pDev->next_lun,0,sizeof(struct adpt_device)); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/scsi/eata_generic.h linux.21pre4-ac2/drivers/scsi/eata_generic.h --- linux.21pre4/drivers/scsi/eata_generic.h 2003-01-29 16:26:49.000000000 +0000 +++ linux.21pre4-ac2/drivers/scsi/eata_generic.h 2003-01-31 11:45:21.000000000 +0000 @@ -97,7 +97,7 @@ #define DELAY(x) { ulong flags, i; \ save_flags(flags); sti(); \ i = jiffies + (x * HZ); \ - while (jiffies < i); \ + while (time_before(jiffies, i)) cpu_relax(); \ restore_flags(flags); } /*********************************************** diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/scsi/scsi.c linux.21pre4-ac2/drivers/scsi/scsi.c --- linux.21pre4/drivers/scsi/scsi.c 2003-01-29 16:26:49.000000000 +0000 +++ linux.21pre4-ac2/drivers/scsi/scsi.c 2003-01-06 15:38:22.000000000 +0000 @@ -352,8 +352,9 @@ int interruptable) { struct Scsi_Host *host; - Scsi_Cmnd *SCpnt = NULL; + Scsi_Cmnd *SCpnt; Scsi_Device *SDpnt; + struct list_head *lp; unsigned long flags; if (!device) @@ -364,7 +365,6 @@ spin_lock_irqsave(&device_request_lock, flags); while (1 == 1) { - SCpnt = NULL; if (!device->device_blocked) { if (device->single_lun) { /* @@ -404,26 +404,21 @@ * If asked to wait, we need to wait, otherwise * return NULL. */ - SCpnt = NULL; goto busy; } } /* - * Now we can check for a free command block for this device. + * Is there a free command block for this device? */ - for (SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) { - if (SCpnt->request.rq_status == RQ_INACTIVE) - break; - } + if (!list_empty(&device->sdev_free_q)) + goto found; } + /* - * If we couldn't find a free command block, and we have been + * Couldn't find a free command block, and we have been * asked to wait, then do so. */ - if (SCpnt) { - break; - } - busy: +busy: /* * If we have been asked to wait for a free block, then * wait here. @@ -475,12 +470,20 @@ return NULL; } } + continue; } else { spin_unlock_irqrestore(&device_request_lock, flags); return NULL; } } +found: + lp = device->sdev_free_q.next; + list_del(lp); + SCpnt = list_entry(lp, Scsi_Cmnd, sc_list); + if (SCpnt->request.rq_status != RQ_INACTIVE) + BUG(); + SCpnt->request.rq_status = RQ_SCSI_BUSY; SCpnt->request.waiting = NULL; /* And no one is waiting for this * to complete */ @@ -526,6 +529,9 @@ SDpnt = SCpnt->device; + /* command is now free - add to list */ + list_add(&SCpnt->sc_list, &SDpnt->sdev_free_q); + SCpnt->request.rq_status = RQ_INACTIVE; SCpnt->state = SCSI_STATE_UNUSED; SCpnt->owner = SCSI_OWNER_NOBODY; @@ -1333,14 +1339,10 @@ */ int scsi_retry_command(Scsi_Cmnd * SCpnt) { - memcpy((void *) SCpnt->cmnd, (void *) SCpnt->data_cmnd, - sizeof(SCpnt->data_cmnd)); - SCpnt->request_buffer = SCpnt->buffer; - SCpnt->request_bufflen = SCpnt->bufflen; - SCpnt->use_sg = SCpnt->old_use_sg; - SCpnt->cmd_len = SCpnt->old_cmd_len; - SCpnt->sc_data_direction = SCpnt->sc_old_data_direction; - SCpnt->underflow = SCpnt->old_underflow; + /* + * Restore the SCSI command state. + */ + scsi_setup_cmd_retry(SCpnt); /* * Zero the sense information from the last time we tried @@ -1448,6 +1450,7 @@ spin_lock_irqsave(&device_request_lock, flags); for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCnext) { SDpnt->device_queue = SCnext = SCpnt->next; + list_del(&SCpnt->sc_list); kfree((char *) SCpnt); } SDpnt->has_cmdblocks = 0; @@ -1484,6 +1487,7 @@ SDpnt->queue_depth = 1; /* live to fight another day */ } SDpnt->device_queue = NULL; + INIT_LIST_HEAD(&SDpnt->sdev_free_q); for (j = 0; j < SDpnt->queue_depth; j++) { SCpnt = (Scsi_Cmnd *) @@ -1513,6 +1517,7 @@ SDpnt->device_queue = SCpnt; SCpnt->state = SCSI_STATE_UNUSED; SCpnt->owner = SCSI_OWNER_NOBODY; + list_add(&SCpnt->sc_list, &SDpnt->sdev_free_q); } if (j < SDpnt->queue_depth) { /* low on space (D.Gilbert 990424) */ printk(KERN_WARNING "scsi_build_commandblocks: want=%d, space for=%d blocks\n", diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/scsi/scsi_error.c linux.21pre4-ac2/drivers/scsi/scsi_error.c --- linux.21pre4/drivers/scsi/scsi_error.c 2003-01-29 16:26:49.000000000 +0000 +++ linux.21pre4-ac2/drivers/scsi/scsi_error.c 2003-01-06 15:38:22.000000000 +0000 @@ -385,16 +385,10 @@ */ STATIC int scsi_eh_retry_command(Scsi_Cmnd * SCpnt) { - memcpy((void *) SCpnt->cmnd, (void *) SCpnt->data_cmnd, - sizeof(SCpnt->data_cmnd)); - SCpnt->request_buffer = SCpnt->buffer; - SCpnt->request_bufflen = SCpnt->bufflen; - SCpnt->use_sg = SCpnt->old_use_sg; - SCpnt->cmd_len = SCpnt->old_cmd_len; - SCpnt->sc_data_direction = SCpnt->sc_old_data_direction; - SCpnt->underflow = SCpnt->old_underflow; - - scsi_send_eh_cmnd(SCpnt, SCpnt->timeout_per_command); + do { + scsi_setup_cmd_retry(SCpnt); + scsi_send_eh_cmnd(SCpnt, SCpnt->timeout_per_command); + } while (SCpnt->eh_state == NEEDS_RETRY); /* * Hey, we are done. Let's look to see what happened. @@ -425,12 +419,6 @@ ASSERT_LOCK(&io_request_lock, 0); - memcpy((void *) SCpnt->cmnd, (void *) generic_sense, - sizeof(generic_sense)); - - if (SCpnt->device->scsi_level <= SCSI_2) - SCpnt->cmnd[1] = SCpnt->lun << 5; - scsi_result = (!SCpnt->host->hostt->unchecked_isa_dma) ? &scsi_result0[0] : kmalloc(512, GFP_ATOMIC | GFP_DMA); @@ -438,24 +426,40 @@ printk("cannot allocate scsi_result in scsi_request_sense.\n"); return FAILED; } - /* - * Zero the sense buffer. Some host adapters automatically always request - * sense, so it is not a good idea that SCpnt->request_buffer and - * SCpnt->sense_buffer point to the same address (DB). - * 0 is not a valid sense code. - */ - memset((void *) SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer)); - memset((void *) scsi_result, 0, 256); saved_result = SCpnt->result; - SCpnt->request_buffer = scsi_result; - SCpnt->request_bufflen = 256; - SCpnt->use_sg = 0; - SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); - SCpnt->sc_data_direction = SCSI_DATA_READ; - SCpnt->underflow = 0; - scsi_send_eh_cmnd(SCpnt, SENSE_TIMEOUT); + do { + memcpy((void *) SCpnt->cmnd, (void *) generic_sense, + sizeof(generic_sense)); + + if (SCpnt->device->scsi_level <= SCSI_2) + SCpnt->cmnd[1] = SCpnt->lun << 5; + + /* + * Zero the sense buffer. Some host adapters automatically + * always request sense, so it is not a good idea that + * SCpnt->request_buffer and SCpnt->sense_buffer point to + * the same address (DB). 0 is not a valid sense code. + */ + memset((void *) SCpnt->sense_buffer, 0, + sizeof(SCpnt->sense_buffer)); + memset((void *) scsi_result, 0, 256); + + SCpnt->request_buffer = scsi_result; + SCpnt->request_bufflen = 256; + SCpnt->use_sg = 0; + SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); + SCpnt->sc_data_direction = SCSI_DATA_READ; + SCpnt->underflow = 0; + + scsi_send_eh_cmnd(SCpnt, SENSE_TIMEOUT); + /* + * If the SCSI device responded with "logical unit + * is in process of becoming ready", we need to + * retry this command. + */ + } while (SCpnt->eh_state == NEEDS_RETRY); /* Last chance to have valid sense data */ if (!scsi_sense_valid(SCpnt)) @@ -497,26 +501,34 @@ static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0}; - memcpy((void *) SCpnt->cmnd, (void *) tur_command, - sizeof(tur_command)); + do { + memcpy((void *) SCpnt->cmnd, (void *) tur_command, + sizeof(tur_command)); - if (SCpnt->device->scsi_level <= SCSI_2) - SCpnt->cmnd[1] = SCpnt->lun << 5; + if (SCpnt->device->scsi_level <= SCSI_2) + SCpnt->cmnd[1] = SCpnt->lun << 5; - /* - * Zero the sense buffer. The SCSI spec mandates that any - * untransferred sense data should be interpreted as being zero. - */ - memset((void *) SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer)); + /* + * Zero the sense buffer. The SCSI spec mandates that any + * untransferred sense data should be interpreted as being zero. + */ + memset((void *) SCpnt->sense_buffer, 0, + sizeof(SCpnt->sense_buffer)); - SCpnt->request_buffer = NULL; - SCpnt->request_bufflen = 0; - SCpnt->use_sg = 0; - SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); - SCpnt->underflow = 0; - SCpnt->sc_data_direction = SCSI_DATA_NONE; + SCpnt->request_buffer = NULL; + SCpnt->request_bufflen = 0; + SCpnt->use_sg = 0; + SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); + SCpnt->underflow = 0; + SCpnt->sc_data_direction = SCSI_DATA_NONE; - scsi_send_eh_cmnd(SCpnt, SENSE_TIMEOUT); + scsi_send_eh_cmnd(SCpnt, SENSE_TIMEOUT); + /* + * If the SCSI device responded with "logical unit + * is in process of becoming ready", we need to + * retry this command. + */ + } while (SCpnt->eh_state == NEEDS_RETRY); /* * When we eventually call scsi_finish, we really wish to complete @@ -589,7 +601,6 @@ host = SCpnt->host; - retry: /* * We will use a queued command if possible, otherwise we will emulate the * queuing and calling of completion function ourselves. @@ -672,14 +683,13 @@ SCSI_LOG_ERROR_RECOVERY(3, printk("scsi_send_eh_cmnd: scsi_eh_completed_normally %x\n", ret)); switch (ret) { - case SUCCESS: - SCpnt->eh_state = SUCCESS; - break; - case NEEDS_RETRY: - goto retry; - case FAILED: default: - SCpnt->eh_state = FAILED; + ret = FAILED; + /*FALLTHROUGH*/ + case FAILED: + case NEEDS_RETRY: + case SUCCESS: + SCpnt->eh_state = ret; break; } } else { diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/scsi/scsi.h linux.21pre4-ac2/drivers/scsi/scsi.h --- linux.21pre4/drivers/scsi/scsi.h 2003-01-29 16:26:49.000000000 +0000 +++ linux.21pre4-ac2/drivers/scsi/scsi.h 2003-02-04 15:54:22.000000000 +0000 @@ -465,6 +465,7 @@ int sectors); extern struct Scsi_Device_Template *scsi_get_request_dev(struct request *); extern int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt); +extern void scsi_setup_cmd_retry(Scsi_Cmnd *SCpnt); extern int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int); extern void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors, int block_sectors); @@ -558,6 +559,7 @@ int (*scsi_init_io_fn) (Scsi_Cmnd *); /* Used to initialize new request */ Scsi_Cmnd *device_queue; /* queue of SCSI Command structures */ + struct list_head sdev_free_q; /* list of free cmds */ /* public: */ unsigned int id, lun, channel; @@ -775,6 +777,8 @@ * received on original command * (auto-sense) */ + struct list_head sc_list; /* Inactive cmd list linkage, guarded + * by device_request_lock. */ unsigned flags; /* diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/scsi/scsi_lib.c linux.21pre4-ac2/drivers/scsi/scsi_lib.c --- linux.21pre4/drivers/scsi/scsi_lib.c 2003-01-29 16:26:50.000000000 +0000 +++ linux.21pre4-ac2/drivers/scsi/scsi_lib.c 2003-01-06 15:38:22.000000000 +0000 @@ -208,6 +208,30 @@ } /* + * Function: scsi_setup_cmd_retry() + * + * Purpose: Restore the command state for a retry + * + * Arguments: SCpnt - command to be restored + * + * Returns: Nothing + * + * Notes: Immediately prior to retrying a command, we need + * to restore certain fields that we saved above. + */ +void scsi_setup_cmd_retry(Scsi_Cmnd *SCpnt) +{ + memcpy((void *) SCpnt->cmnd, (void *) SCpnt->data_cmnd, + sizeof(SCpnt->data_cmnd)); + SCpnt->request_buffer = SCpnt->buffer; + SCpnt->request_bufflen = SCpnt->bufflen; + SCpnt->use_sg = SCpnt->old_use_sg; + SCpnt->cmd_len = SCpnt->old_cmd_len; + SCpnt->sc_data_direction = SCpnt->sc_old_data_direction; + SCpnt->underflow = SCpnt->old_underflow; +} + +/* * Function: scsi_queue_next_request() * * Purpose: Handle post-processing of completed commands. @@ -720,7 +744,7 @@ printk("scsi%d: ERROR on channel %d, id %d, lun %d, CDB: ", SCpnt->host->host_no, (int) SCpnt->channel, (int) SCpnt->target, (int) SCpnt->lun); - print_command(SCpnt->cmnd); + print_command(SCpnt->data_cmnd); print_sense("sd", SCpnt); SCpnt = scsi_end_request(SCpnt, 0, block_sectors); return; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/scsi/scsi_scan.c linux.21pre4-ac2/drivers/scsi/scsi_scan.c --- linux.21pre4/drivers/scsi/scsi_scan.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/scsi/scsi_scan.c 2003-01-28 16:17:48.000000000 +0000 @@ -596,6 +596,7 @@ } else { /* assume no peripheral if any other sort of error */ scsi_release_request(SRpnt); + scsi_release_commandblocks(SDpnt); return 0; } } @@ -619,6 +620,7 @@ */ if ((scsi_result[0] >> 5) == 3) { scsi_release_request(SRpnt); + scsi_release_commandblocks(SDpnt); return 0; /* assume no peripheral if any sort of error */ } /* The Toshiba ROM was "gender-changed" here as an inline hack. diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/scsi/sd.c linux.21pre4-ac2/drivers/scsi/sd.c --- linux.21pre4/drivers/scsi/sd.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/scsi/sd.c 2003-01-29 17:21:32.000000000 +0000 @@ -848,10 +848,13 @@ break; } - /* Look for non-removable devices that return NOT_READY. + /* Look for non-removable devices that return NOT_READY, + * and don't require manual intervention. * Issue command to spin up drive for these cases. */ if (the_result && !rscsi_disks[i].device->removable && - SRpnt->sr_sense_buffer[2] == NOT_READY) { + SRpnt->sr_sense_buffer[2] == NOT_READY && + ! ( SRpnt->sr_sense_buffer[12] == 0x04 && /* ASC */ + SRpnt->sr_sense_buffer[13] == 0x03 )) { /* ASCQ */ unsigned long time1; if (!spintime) { printk("%s: Spinning up disk...", nbuff); @@ -1001,7 +1004,7 @@ */ int m; int hard_sector = sector_size; - int sz = rscsi_disks[i].capacity * (hard_sector/256); + unsigned int sz = (rscsi_disks[i].capacity/2) * (hard_sector/256); /* There are 16 minors allocated for each major device */ for (m = i << 4; m < ((i + 1) << 4); m++) { @@ -1009,9 +1012,9 @@ } printk("SCSI device %s: " - "%d %d-byte hdwr sectors (%d MB)\n", + "%u %d-byte hdwr sectors (%d MB)\n", nbuff, rscsi_disks[i].capacity, - hard_sector, (sz/2 - sz/1250 + 974)/1950); + hard_sector, (sz - sz/625 + 974)/1950); } /* Rescale capacity to 512-byte units */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/scsi/sd.h linux.21pre4-ac2/drivers/scsi/sd.h --- linux.21pre4/drivers/scsi/sd.h 2003-01-29 16:26:49.000000000 +0000 +++ linux.21pre4-ac2/drivers/scsi/sd.h 2003-02-04 15:54:22.000000000 +0000 @@ -24,13 +24,13 @@ #endif typedef struct scsi_disk { - unsigned capacity; /* size in blocks */ Scsi_Device *device; - unsigned char ready; /* flag ready for FLOPTICAL */ - unsigned char write_prot; /* flag write_protect for rmvable dev */ - unsigned char sector_bit_size; /* sector_size = 2 to the bit size power */ - unsigned char sector_bit_shift; /* power of 2 sectors per FS block */ + unsigned capacity; /* size in blocks */ + unsigned char sector_bit_size; /* sector_size = 2 to the bit size power */ + unsigned char sector_bit_shift; /* power of 2 sectors per FS block */ unsigned has_part_table:1; /* has partition table */ + unsigned ready:1; /* flag ready for FLOPTICAL */ + unsigned write_prot:1; /* flag write_protect for rmvable dev */ } Scsi_Disk; extern int revalidate_scsidisk(kdev_t dev, int maxusage); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/scsi/sim710_d.h linux.21pre4-ac2/drivers/scsi/sim710_d.h --- linux.21pre4/drivers/scsi/sim710_d.h 2003-01-29 16:26:50.000000000 +0000 +++ linux.21pre4-ac2/drivers/scsi/sim710_d.h 2003-01-06 15:38:23.000000000 +0000 @@ -18,15 +18,12 @@ ABSOLUTE reselected_identify = 0 ABSOLUTE msgin_buf = 0 +ABSOLUTE msg_reject = 0 +ABSOLUTE test1_src = 0 +ABSOLUTE test1_dst = 0 -ABSOLUTE int_bad_extmsg1a = 0xab930000 -ABSOLUTE int_bad_extmsg1b = 0xab930001 -ABSOLUTE int_bad_extmsg2a = 0xab930002 -ABSOLUTE int_bad_extmsg2b = 0xab930003 -ABSOLUTE int_bad_extmsg3a = 0xab930004 -ABSOLUTE int_bad_extmsg3b = 0xab930005 ABSOLUTE int_bad_msg1 = 0xab930006 ABSOLUTE int_bad_msg2 = 0xab930007 ABSOLUTE int_bad_msg3 = 0xab930008 @@ -50,7 +47,7 @@ ABSOLUTE int_disc2 = 0xab93001a ABSOLUTE int_disc3 = 0xab93001b ABSOLUTE int_not_rej = 0xab93001c - +ABSOLUTE int_test1 = 0xab93001d @@ -65,6 +62,9 @@ +ABSOLUTE did_reject = 0x01 + + @@ -74,1641 +74,1709 @@ at 0x00000000 : */ 0x60000200,0x00000000, /* - MOVE SCRATCH0 & 0 TO SCRATCH0 - -at 0x00000002 : */ 0x7c340000,0x00000000, -/* ; Enable selection timer MOVE CTEST7 & 0xef TO CTEST7 -at 0x00000004 : */ 0x7c1bef00,0x00000000, +at 0x00000002 : */ 0x7c1bef00,0x00000000, /* SELECT ATN FROM dsa_select, reselect -at 0x00000006 : */ 0x43000000,0x00000c48, +at 0x00000004 : */ 0x43000000,0x00000cd0, /* JUMP get_status, WHEN STATUS -at 0x00000008 : */ 0x830b0000,0x000000a0, +at 0x00000006 : */ 0x830b0000,0x00000098, /* ; Disable selection timer MOVE CTEST7 | 0x10 TO CTEST7 -at 0x0000000a : */ 0x7a1b1000,0x00000000, +at 0x00000008 : */ 0x7a1b1000,0x00000000, /* MOVE SCRATCH0 | had_select TO SCRATCH0 -at 0x0000000c : */ 0x7a340100,0x00000000, +at 0x0000000a : */ 0x7a340100,0x00000000, /* INT int_sel_no_ident, IF NOT MSG_OUT -at 0x0000000e : */ 0x9e020000,0xab930013, +at 0x0000000c : */ 0x9e020000,0xab930013, /* MOVE SCRATCH0 | had_msgout TO SCRATCH0 -at 0x00000010 : */ 0x7a340200,0x00000000, +at 0x0000000e : */ 0x7a340200,0x00000000, /* MOVE FROM dsa_msgout, when MSG_OUT -at 0x00000012 : */ 0x1e000000,0x00000008, +at 0x00000010 : */ 0x1e000000,0x00000008, /* ENTRY done_ident done_ident: JUMP get_status, IF STATUS -at 0x00000014 : */ 0x830a0000,0x000000a0, +at 0x00000012 : */ 0x830a0000,0x00000098, /* redo_msgin1: JUMP get_msgin1, WHEN MSG_IN -at 0x00000016 : */ 0x870b0000,0x00000920, +at 0x00000014 : */ 0x870b0000,0x00000918, /* INT int_sel_not_cmd, IF NOT CMD -at 0x00000018 : */ 0x9a020000,0xab930014, +at 0x00000016 : */ 0x9a020000,0xab930014, /* ENTRY resume_cmd resume_cmd: MOVE SCRATCH0 | had_cmdout TO SCRATCH0 -at 0x0000001a : */ 0x7a340400,0x00000000, +at 0x00000018 : */ 0x7a340400,0x00000000, /* MOVE FROM dsa_cmnd, WHEN CMD -at 0x0000001c : */ 0x1a000000,0x00000010, +at 0x0000001a : */ 0x1a000000,0x00000010, /* ENTRY resume_pmm resume_pmm: redo_msgin2: JUMP get_msgin2, WHEN MSG_IN -at 0x0000001e : */ 0x870b0000,0x00000a20, +at 0x0000001c : */ 0x870b0000,0x00000a48, /* JUMP get_status, IF STATUS -at 0x00000020 : */ 0x830a0000,0x000000a0, +at 0x0000001e : */ 0x830a0000,0x00000098, /* JUMP input_data, IF DATA_IN -at 0x00000022 : */ 0x810a0000,0x000000e0, +at 0x00000020 : */ 0x810a0000,0x000000d8, /* JUMP output_data, IF DATA_OUT -at 0x00000024 : */ 0x800a0000,0x000004f8, +at 0x00000022 : */ 0x800a0000,0x000004f0, /* INT int_cmd_bad_phase -at 0x00000026 : */ 0x98080000,0xab930009, +at 0x00000024 : */ 0x98080000,0xab930009, /* get_status: ; Disable selection timer MOVE CTEST7 | 0x10 TO CTEST7 -at 0x00000028 : */ 0x7a1b1000,0x00000000, +at 0x00000026 : */ 0x7a1b1000,0x00000000, /* MOVE FROM dsa_status, WHEN STATUS -at 0x0000002a : */ 0x1b000000,0x00000018, +at 0x00000028 : */ 0x1b000000,0x00000018, /* INT int_status_not_msgin, WHEN NOT MSG_IN -at 0x0000002c : */ 0x9f030000,0xab930015, +at 0x0000002a : */ 0x9f030000,0xab930015, /* MOVE FROM dsa_msgin, WHEN MSG_IN -at 0x0000002e : */ 0x1f000000,0x00000020, +at 0x0000002c : */ 0x1f000000,0x00000020, /* INT int_not_cmd_complete, IF NOT 0x00 -at 0x00000030 : */ 0x98040000,0xab930012, +at 0x0000002e : */ 0x98040000,0xab930012, /* CLEAR ACK -at 0x00000032 : */ 0x60000040,0x00000000, +at 0x00000030 : */ 0x60000040,0x00000000, /* ENTRY wait_disc_complete wait_disc_complete: WAIT DISCONNECT -at 0x00000034 : */ 0x48000000,0x00000000, +at 0x00000032 : */ 0x48000000,0x00000000, /* INT int_cmd_complete -at 0x00000036 : */ 0x98080000,0xab93000a, +at 0x00000034 : */ 0x98080000,0xab93000a, /* input_data: MOVE SCRATCH0 | had_datain TO SCRATCH0 -at 0x00000038 : */ 0x7a340800,0x00000000, +at 0x00000036 : */ 0x7a340800,0x00000000, /* ENTRY patch_input_data patch_input_data: JUMP 0 -at 0x0000003a : */ 0x80080000,0x00000000, +at 0x00000038 : */ 0x80080000,0x00000000, /* MOVE FROM dsa_datain+0x0000, WHEN DATA_IN -at 0x0000003c : */ 0x19000000,0x00000028, +at 0x0000003a : */ 0x19000000,0x00000028, /* MOVE FROM dsa_datain+0x0008, WHEN DATA_IN -at 0x0000003e : */ 0x19000000,0x00000030, +at 0x0000003c : */ 0x19000000,0x00000030, /* MOVE FROM dsa_datain+0x0010, WHEN DATA_IN -at 0x00000040 : */ 0x19000000,0x00000038, +at 0x0000003e : */ 0x19000000,0x00000038, /* MOVE FROM dsa_datain+0x0018, WHEN DATA_IN -at 0x00000042 : */ 0x19000000,0x00000040, +at 0x00000040 : */ 0x19000000,0x00000040, /* MOVE FROM dsa_datain+0x0020, WHEN DATA_IN -at 0x00000044 : */ 0x19000000,0x00000048, +at 0x00000042 : */ 0x19000000,0x00000048, /* MOVE FROM dsa_datain+0x0028, WHEN DATA_IN -at 0x00000046 : */ 0x19000000,0x00000050, +at 0x00000044 : */ 0x19000000,0x00000050, /* MOVE FROM dsa_datain+0x0030, WHEN DATA_IN -at 0x00000048 : */ 0x19000000,0x00000058, +at 0x00000046 : */ 0x19000000,0x00000058, /* MOVE FROM dsa_datain+0x0038, WHEN DATA_IN -at 0x0000004a : */ 0x19000000,0x00000060, +at 0x00000048 : */ 0x19000000,0x00000060, /* MOVE FROM dsa_datain+0x0040, WHEN DATA_IN -at 0x0000004c : */ 0x19000000,0x00000068, +at 0x0000004a : */ 0x19000000,0x00000068, /* MOVE FROM dsa_datain+0x0048, WHEN DATA_IN -at 0x0000004e : */ 0x19000000,0x00000070, +at 0x0000004c : */ 0x19000000,0x00000070, /* MOVE FROM dsa_datain+0x0050, WHEN DATA_IN -at 0x00000050 : */ 0x19000000,0x00000078, +at 0x0000004e : */ 0x19000000,0x00000078, /* MOVE FROM dsa_datain+0x0058, WHEN DATA_IN -at 0x00000052 : */ 0x19000000,0x00000080, +at 0x00000050 : */ 0x19000000,0x00000080, /* MOVE FROM dsa_datain+0x0060, WHEN DATA_IN -at 0x00000054 : */ 0x19000000,0x00000088, +at 0x00000052 : */ 0x19000000,0x00000088, /* MOVE FROM dsa_datain+0x0068, WHEN DATA_IN -at 0x00000056 : */ 0x19000000,0x00000090, +at 0x00000054 : */ 0x19000000,0x00000090, /* MOVE FROM dsa_datain+0x0070, WHEN DATA_IN -at 0x00000058 : */ 0x19000000,0x00000098, +at 0x00000056 : */ 0x19000000,0x00000098, /* MOVE FROM dsa_datain+0x0078, WHEN DATA_IN -at 0x0000005a : */ 0x19000000,0x000000a0, +at 0x00000058 : */ 0x19000000,0x000000a0, /* MOVE FROM dsa_datain+0x0080, WHEN DATA_IN -at 0x0000005c : */ 0x19000000,0x000000a8, +at 0x0000005a : */ 0x19000000,0x000000a8, /* MOVE FROM dsa_datain+0x0088, WHEN DATA_IN -at 0x0000005e : */ 0x19000000,0x000000b0, +at 0x0000005c : */ 0x19000000,0x000000b0, /* MOVE FROM dsa_datain+0x0090, WHEN DATA_IN -at 0x00000060 : */ 0x19000000,0x000000b8, +at 0x0000005e : */ 0x19000000,0x000000b8, /* MOVE FROM dsa_datain+0x0098, WHEN DATA_IN -at 0x00000062 : */ 0x19000000,0x000000c0, +at 0x00000060 : */ 0x19000000,0x000000c0, /* MOVE FROM dsa_datain+0x00a0, WHEN DATA_IN -at 0x00000064 : */ 0x19000000,0x000000c8, +at 0x00000062 : */ 0x19000000,0x000000c8, /* MOVE FROM dsa_datain+0x00a8, WHEN DATA_IN -at 0x00000066 : */ 0x19000000,0x000000d0, +at 0x00000064 : */ 0x19000000,0x000000d0, /* MOVE FROM dsa_datain+0x00b0, WHEN DATA_IN -at 0x00000068 : */ 0x19000000,0x000000d8, +at 0x00000066 : */ 0x19000000,0x000000d8, /* MOVE FROM dsa_datain+0x00b8, WHEN DATA_IN -at 0x0000006a : */ 0x19000000,0x000000e0, +at 0x00000068 : */ 0x19000000,0x000000e0, /* MOVE FROM dsa_datain+0x00c0, WHEN DATA_IN -at 0x0000006c : */ 0x19000000,0x000000e8, +at 0x0000006a : */ 0x19000000,0x000000e8, /* MOVE FROM dsa_datain+0x00c8, WHEN DATA_IN -at 0x0000006e : */ 0x19000000,0x000000f0, +at 0x0000006c : */ 0x19000000,0x000000f0, /* MOVE FROM dsa_datain+0x00d0, WHEN DATA_IN -at 0x00000070 : */ 0x19000000,0x000000f8, +at 0x0000006e : */ 0x19000000,0x000000f8, /* MOVE FROM dsa_datain+0x00d8, WHEN DATA_IN -at 0x00000072 : */ 0x19000000,0x00000100, +at 0x00000070 : */ 0x19000000,0x00000100, /* MOVE FROM dsa_datain+0x00e0, WHEN DATA_IN -at 0x00000074 : */ 0x19000000,0x00000108, +at 0x00000072 : */ 0x19000000,0x00000108, /* MOVE FROM dsa_datain+0x00e8, WHEN DATA_IN -at 0x00000076 : */ 0x19000000,0x00000110, +at 0x00000074 : */ 0x19000000,0x00000110, /* MOVE FROM dsa_datain+0x00f0, WHEN DATA_IN -at 0x00000078 : */ 0x19000000,0x00000118, +at 0x00000076 : */ 0x19000000,0x00000118, /* MOVE FROM dsa_datain+0x00f8, WHEN DATA_IN -at 0x0000007a : */ 0x19000000,0x00000120, +at 0x00000078 : */ 0x19000000,0x00000120, /* MOVE FROM dsa_datain+0x0100, WHEN DATA_IN -at 0x0000007c : */ 0x19000000,0x00000128, +at 0x0000007a : */ 0x19000000,0x00000128, /* MOVE FROM dsa_datain+0x0108, WHEN DATA_IN -at 0x0000007e : */ 0x19000000,0x00000130, +at 0x0000007c : */ 0x19000000,0x00000130, /* MOVE FROM dsa_datain+0x0110, WHEN DATA_IN -at 0x00000080 : */ 0x19000000,0x00000138, +at 0x0000007e : */ 0x19000000,0x00000138, /* MOVE FROM dsa_datain+0x0118, WHEN DATA_IN -at 0x00000082 : */ 0x19000000,0x00000140, +at 0x00000080 : */ 0x19000000,0x00000140, /* MOVE FROM dsa_datain+0x0120, WHEN DATA_IN -at 0x00000084 : */ 0x19000000,0x00000148, +at 0x00000082 : */ 0x19000000,0x00000148, /* MOVE FROM dsa_datain+0x0128, WHEN DATA_IN -at 0x00000086 : */ 0x19000000,0x00000150, +at 0x00000084 : */ 0x19000000,0x00000150, /* MOVE FROM dsa_datain+0x0130, WHEN DATA_IN -at 0x00000088 : */ 0x19000000,0x00000158, +at 0x00000086 : */ 0x19000000,0x00000158, /* MOVE FROM dsa_datain+0x0138, WHEN DATA_IN -at 0x0000008a : */ 0x19000000,0x00000160, +at 0x00000088 : */ 0x19000000,0x00000160, /* MOVE FROM dsa_datain+0x0140, WHEN DATA_IN -at 0x0000008c : */ 0x19000000,0x00000168, +at 0x0000008a : */ 0x19000000,0x00000168, /* MOVE FROM dsa_datain+0x0148, WHEN DATA_IN -at 0x0000008e : */ 0x19000000,0x00000170, +at 0x0000008c : */ 0x19000000,0x00000170, /* MOVE FROM dsa_datain+0x0150, WHEN DATA_IN -at 0x00000090 : */ 0x19000000,0x00000178, +at 0x0000008e : */ 0x19000000,0x00000178, /* MOVE FROM dsa_datain+0x0158, WHEN DATA_IN -at 0x00000092 : */ 0x19000000,0x00000180, +at 0x00000090 : */ 0x19000000,0x00000180, /* MOVE FROM dsa_datain+0x0160, WHEN DATA_IN -at 0x00000094 : */ 0x19000000,0x00000188, +at 0x00000092 : */ 0x19000000,0x00000188, /* MOVE FROM dsa_datain+0x0168, WHEN DATA_IN -at 0x00000096 : */ 0x19000000,0x00000190, +at 0x00000094 : */ 0x19000000,0x00000190, /* MOVE FROM dsa_datain+0x0170, WHEN DATA_IN -at 0x00000098 : */ 0x19000000,0x00000198, +at 0x00000096 : */ 0x19000000,0x00000198, /* MOVE FROM dsa_datain+0x0178, WHEN DATA_IN -at 0x0000009a : */ 0x19000000,0x000001a0, +at 0x00000098 : */ 0x19000000,0x000001a0, /* MOVE FROM dsa_datain+0x0180, WHEN DATA_IN -at 0x0000009c : */ 0x19000000,0x000001a8, +at 0x0000009a : */ 0x19000000,0x000001a8, /* MOVE FROM dsa_datain+0x0188, WHEN DATA_IN -at 0x0000009e : */ 0x19000000,0x000001b0, +at 0x0000009c : */ 0x19000000,0x000001b0, /* MOVE FROM dsa_datain+0x0190, WHEN DATA_IN -at 0x000000a0 : */ 0x19000000,0x000001b8, +at 0x0000009e : */ 0x19000000,0x000001b8, /* MOVE FROM dsa_datain+0x0198, WHEN DATA_IN -at 0x000000a2 : */ 0x19000000,0x000001c0, +at 0x000000a0 : */ 0x19000000,0x000001c0, /* MOVE FROM dsa_datain+0x01a0, WHEN DATA_IN -at 0x000000a4 : */ 0x19000000,0x000001c8, +at 0x000000a2 : */ 0x19000000,0x000001c8, /* MOVE FROM dsa_datain+0x01a8, WHEN DATA_IN -at 0x000000a6 : */ 0x19000000,0x000001d0, +at 0x000000a4 : */ 0x19000000,0x000001d0, /* MOVE FROM dsa_datain+0x01b0, WHEN DATA_IN -at 0x000000a8 : */ 0x19000000,0x000001d8, +at 0x000000a6 : */ 0x19000000,0x000001d8, /* MOVE FROM dsa_datain+0x01b8, WHEN DATA_IN -at 0x000000aa : */ 0x19000000,0x000001e0, +at 0x000000a8 : */ 0x19000000,0x000001e0, /* MOVE FROM dsa_datain+0x01c0, WHEN DATA_IN -at 0x000000ac : */ 0x19000000,0x000001e8, +at 0x000000aa : */ 0x19000000,0x000001e8, /* MOVE FROM dsa_datain+0x01c8, WHEN DATA_IN -at 0x000000ae : */ 0x19000000,0x000001f0, +at 0x000000ac : */ 0x19000000,0x000001f0, /* MOVE FROM dsa_datain+0x01d0, WHEN DATA_IN -at 0x000000b0 : */ 0x19000000,0x000001f8, +at 0x000000ae : */ 0x19000000,0x000001f8, /* MOVE FROM dsa_datain+0x01d8, WHEN DATA_IN -at 0x000000b2 : */ 0x19000000,0x00000200, +at 0x000000b0 : */ 0x19000000,0x00000200, /* MOVE FROM dsa_datain+0x01e0, WHEN DATA_IN -at 0x000000b4 : */ 0x19000000,0x00000208, +at 0x000000b2 : */ 0x19000000,0x00000208, /* MOVE FROM dsa_datain+0x01e8, WHEN DATA_IN -at 0x000000b6 : */ 0x19000000,0x00000210, +at 0x000000b4 : */ 0x19000000,0x00000210, /* MOVE FROM dsa_datain+0x01f0, WHEN DATA_IN -at 0x000000b8 : */ 0x19000000,0x00000218, +at 0x000000b6 : */ 0x19000000,0x00000218, /* MOVE FROM dsa_datain+0x01f8, WHEN DATA_IN -at 0x000000ba : */ 0x19000000,0x00000220, +at 0x000000b8 : */ 0x19000000,0x00000220, /* MOVE FROM dsa_datain+0x0200, WHEN DATA_IN -at 0x000000bc : */ 0x19000000,0x00000228, +at 0x000000ba : */ 0x19000000,0x00000228, /* MOVE FROM dsa_datain+0x0208, WHEN DATA_IN -at 0x000000be : */ 0x19000000,0x00000230, +at 0x000000bc : */ 0x19000000,0x00000230, /* MOVE FROM dsa_datain+0x0210, WHEN DATA_IN -at 0x000000c0 : */ 0x19000000,0x00000238, +at 0x000000be : */ 0x19000000,0x00000238, /* MOVE FROM dsa_datain+0x0218, WHEN DATA_IN -at 0x000000c2 : */ 0x19000000,0x00000240, +at 0x000000c0 : */ 0x19000000,0x00000240, /* MOVE FROM dsa_datain+0x0220, WHEN DATA_IN -at 0x000000c4 : */ 0x19000000,0x00000248, +at 0x000000c2 : */ 0x19000000,0x00000248, /* MOVE FROM dsa_datain+0x0228, WHEN DATA_IN -at 0x000000c6 : */ 0x19000000,0x00000250, +at 0x000000c4 : */ 0x19000000,0x00000250, /* MOVE FROM dsa_datain+0x0230, WHEN DATA_IN -at 0x000000c8 : */ 0x19000000,0x00000258, +at 0x000000c6 : */ 0x19000000,0x00000258, /* MOVE FROM dsa_datain+0x0238, WHEN DATA_IN -at 0x000000ca : */ 0x19000000,0x00000260, +at 0x000000c8 : */ 0x19000000,0x00000260, /* MOVE FROM dsa_datain+0x0240, WHEN DATA_IN -at 0x000000cc : */ 0x19000000,0x00000268, +at 0x000000ca : */ 0x19000000,0x00000268, /* MOVE FROM dsa_datain+0x0248, WHEN DATA_IN -at 0x000000ce : */ 0x19000000,0x00000270, +at 0x000000cc : */ 0x19000000,0x00000270, /* MOVE FROM dsa_datain+0x0250, WHEN DATA_IN -at 0x000000d0 : */ 0x19000000,0x00000278, +at 0x000000ce : */ 0x19000000,0x00000278, /* MOVE FROM dsa_datain+0x0258, WHEN DATA_IN -at 0x000000d2 : */ 0x19000000,0x00000280, +at 0x000000d0 : */ 0x19000000,0x00000280, /* MOVE FROM dsa_datain+0x0260, WHEN DATA_IN -at 0x000000d4 : */ 0x19000000,0x00000288, +at 0x000000d2 : */ 0x19000000,0x00000288, /* MOVE FROM dsa_datain+0x0268, WHEN DATA_IN -at 0x000000d6 : */ 0x19000000,0x00000290, +at 0x000000d4 : */ 0x19000000,0x00000290, /* MOVE FROM dsa_datain+0x0270, WHEN DATA_IN -at 0x000000d8 : */ 0x19000000,0x00000298, +at 0x000000d6 : */ 0x19000000,0x00000298, /* MOVE FROM dsa_datain+0x0278, WHEN DATA_IN -at 0x000000da : */ 0x19000000,0x000002a0, +at 0x000000d8 : */ 0x19000000,0x000002a0, /* MOVE FROM dsa_datain+0x0280, WHEN DATA_IN -at 0x000000dc : */ 0x19000000,0x000002a8, +at 0x000000da : */ 0x19000000,0x000002a8, /* MOVE FROM dsa_datain+0x0288, WHEN DATA_IN -at 0x000000de : */ 0x19000000,0x000002b0, +at 0x000000dc : */ 0x19000000,0x000002b0, /* MOVE FROM dsa_datain+0x0290, WHEN DATA_IN -at 0x000000e0 : */ 0x19000000,0x000002b8, +at 0x000000de : */ 0x19000000,0x000002b8, /* MOVE FROM dsa_datain+0x0298, WHEN DATA_IN -at 0x000000e2 : */ 0x19000000,0x000002c0, +at 0x000000e0 : */ 0x19000000,0x000002c0, /* MOVE FROM dsa_datain+0x02a0, WHEN DATA_IN -at 0x000000e4 : */ 0x19000000,0x000002c8, +at 0x000000e2 : */ 0x19000000,0x000002c8, /* MOVE FROM dsa_datain+0x02a8, WHEN DATA_IN -at 0x000000e6 : */ 0x19000000,0x000002d0, +at 0x000000e4 : */ 0x19000000,0x000002d0, /* MOVE FROM dsa_datain+0x02b0, WHEN DATA_IN -at 0x000000e8 : */ 0x19000000,0x000002d8, +at 0x000000e6 : */ 0x19000000,0x000002d8, /* MOVE FROM dsa_datain+0x02b8, WHEN DATA_IN -at 0x000000ea : */ 0x19000000,0x000002e0, +at 0x000000e8 : */ 0x19000000,0x000002e0, /* MOVE FROM dsa_datain+0x02c0, WHEN DATA_IN -at 0x000000ec : */ 0x19000000,0x000002e8, +at 0x000000ea : */ 0x19000000,0x000002e8, /* MOVE FROM dsa_datain+0x02c8, WHEN DATA_IN -at 0x000000ee : */ 0x19000000,0x000002f0, +at 0x000000ec : */ 0x19000000,0x000002f0, /* MOVE FROM dsa_datain+0x02d0, WHEN DATA_IN -at 0x000000f0 : */ 0x19000000,0x000002f8, +at 0x000000ee : */ 0x19000000,0x000002f8, /* MOVE FROM dsa_datain+0x02d8, WHEN DATA_IN -at 0x000000f2 : */ 0x19000000,0x00000300, +at 0x000000f0 : */ 0x19000000,0x00000300, /* MOVE FROM dsa_datain+0x02e0, WHEN DATA_IN -at 0x000000f4 : */ 0x19000000,0x00000308, +at 0x000000f2 : */ 0x19000000,0x00000308, /* MOVE FROM dsa_datain+0x02e8, WHEN DATA_IN -at 0x000000f6 : */ 0x19000000,0x00000310, +at 0x000000f4 : */ 0x19000000,0x00000310, /* MOVE FROM dsa_datain+0x02f0, WHEN DATA_IN -at 0x000000f8 : */ 0x19000000,0x00000318, +at 0x000000f6 : */ 0x19000000,0x00000318, /* MOVE FROM dsa_datain+0x02f8, WHEN DATA_IN -at 0x000000fa : */ 0x19000000,0x00000320, +at 0x000000f8 : */ 0x19000000,0x00000320, /* MOVE FROM dsa_datain+0x0300, WHEN DATA_IN -at 0x000000fc : */ 0x19000000,0x00000328, +at 0x000000fa : */ 0x19000000,0x00000328, /* MOVE FROM dsa_datain+0x0308, WHEN DATA_IN -at 0x000000fe : */ 0x19000000,0x00000330, +at 0x000000fc : */ 0x19000000,0x00000330, /* MOVE FROM dsa_datain+0x0310, WHEN DATA_IN -at 0x00000100 : */ 0x19000000,0x00000338, +at 0x000000fe : */ 0x19000000,0x00000338, /* MOVE FROM dsa_datain+0x0318, WHEN DATA_IN -at 0x00000102 : */ 0x19000000,0x00000340, +at 0x00000100 : */ 0x19000000,0x00000340, /* MOVE FROM dsa_datain+0x0320, WHEN DATA_IN -at 0x00000104 : */ 0x19000000,0x00000348, +at 0x00000102 : */ 0x19000000,0x00000348, /* MOVE FROM dsa_datain+0x0328, WHEN DATA_IN -at 0x00000106 : */ 0x19000000,0x00000350, +at 0x00000104 : */ 0x19000000,0x00000350, /* MOVE FROM dsa_datain+0x0330, WHEN DATA_IN -at 0x00000108 : */ 0x19000000,0x00000358, +at 0x00000106 : */ 0x19000000,0x00000358, /* MOVE FROM dsa_datain+0x0338, WHEN DATA_IN -at 0x0000010a : */ 0x19000000,0x00000360, +at 0x00000108 : */ 0x19000000,0x00000360, /* MOVE FROM dsa_datain+0x0340, WHEN DATA_IN -at 0x0000010c : */ 0x19000000,0x00000368, +at 0x0000010a : */ 0x19000000,0x00000368, /* MOVE FROM dsa_datain+0x0348, WHEN DATA_IN -at 0x0000010e : */ 0x19000000,0x00000370, +at 0x0000010c : */ 0x19000000,0x00000370, /* MOVE FROM dsa_datain+0x0350, WHEN DATA_IN -at 0x00000110 : */ 0x19000000,0x00000378, +at 0x0000010e : */ 0x19000000,0x00000378, /* MOVE FROM dsa_datain+0x0358, WHEN DATA_IN -at 0x00000112 : */ 0x19000000,0x00000380, +at 0x00000110 : */ 0x19000000,0x00000380, /* MOVE FROM dsa_datain+0x0360, WHEN DATA_IN -at 0x00000114 : */ 0x19000000,0x00000388, +at 0x00000112 : */ 0x19000000,0x00000388, /* MOVE FROM dsa_datain+0x0368, WHEN DATA_IN -at 0x00000116 : */ 0x19000000,0x00000390, +at 0x00000114 : */ 0x19000000,0x00000390, /* MOVE FROM dsa_datain+0x0370, WHEN DATA_IN -at 0x00000118 : */ 0x19000000,0x00000398, +at 0x00000116 : */ 0x19000000,0x00000398, /* MOVE FROM dsa_datain+0x0378, WHEN DATA_IN -at 0x0000011a : */ 0x19000000,0x000003a0, +at 0x00000118 : */ 0x19000000,0x000003a0, /* MOVE FROM dsa_datain+0x0380, WHEN DATA_IN -at 0x0000011c : */ 0x19000000,0x000003a8, +at 0x0000011a : */ 0x19000000,0x000003a8, /* MOVE FROM dsa_datain+0x0388, WHEN DATA_IN -at 0x0000011e : */ 0x19000000,0x000003b0, +at 0x0000011c : */ 0x19000000,0x000003b0, /* MOVE FROM dsa_datain+0x0390, WHEN DATA_IN -at 0x00000120 : */ 0x19000000,0x000003b8, +at 0x0000011e : */ 0x19000000,0x000003b8, /* MOVE FROM dsa_datain+0x0398, WHEN DATA_IN -at 0x00000122 : */ 0x19000000,0x000003c0, +at 0x00000120 : */ 0x19000000,0x000003c0, /* MOVE FROM dsa_datain+0x03a0, WHEN DATA_IN -at 0x00000124 : */ 0x19000000,0x000003c8, +at 0x00000122 : */ 0x19000000,0x000003c8, /* MOVE FROM dsa_datain+0x03a8, WHEN DATA_IN -at 0x00000126 : */ 0x19000000,0x000003d0, +at 0x00000124 : */ 0x19000000,0x000003d0, /* MOVE FROM dsa_datain+0x03b0, WHEN DATA_IN -at 0x00000128 : */ 0x19000000,0x000003d8, +at 0x00000126 : */ 0x19000000,0x000003d8, /* MOVE FROM dsa_datain+0x03b8, WHEN DATA_IN -at 0x0000012a : */ 0x19000000,0x000003e0, +at 0x00000128 : */ 0x19000000,0x000003e0, /* MOVE FROM dsa_datain+0x03c0, WHEN DATA_IN -at 0x0000012c : */ 0x19000000,0x000003e8, +at 0x0000012a : */ 0x19000000,0x000003e8, /* MOVE FROM dsa_datain+0x03c8, WHEN DATA_IN -at 0x0000012e : */ 0x19000000,0x000003f0, +at 0x0000012c : */ 0x19000000,0x000003f0, /* MOVE FROM dsa_datain+0x03d0, WHEN DATA_IN -at 0x00000130 : */ 0x19000000,0x000003f8, +at 0x0000012e : */ 0x19000000,0x000003f8, /* MOVE FROM dsa_datain+0x03d8, WHEN DATA_IN -at 0x00000132 : */ 0x19000000,0x00000400, +at 0x00000130 : */ 0x19000000,0x00000400, /* MOVE FROM dsa_datain+0x03e0, WHEN DATA_IN -at 0x00000134 : */ 0x19000000,0x00000408, +at 0x00000132 : */ 0x19000000,0x00000408, /* MOVE FROM dsa_datain+0x03e8, WHEN DATA_IN -at 0x00000136 : */ 0x19000000,0x00000410, +at 0x00000134 : */ 0x19000000,0x00000410, /* MOVE FROM dsa_datain+0x03f0, WHEN DATA_IN -at 0x00000138 : */ 0x19000000,0x00000418, +at 0x00000136 : */ 0x19000000,0x00000418, /* MOVE FROM dsa_datain+0x03f8, WHEN DATA_IN -at 0x0000013a : */ 0x19000000,0x00000420, +at 0x00000138 : */ 0x19000000,0x00000420, /* JUMP end_data_trans -at 0x0000013c : */ 0x80080000,0x00000908, +at 0x0000013a : */ 0x80080000,0x00000900, /* output_data: MOVE SCRATCH0 | had_dataout TO SCRATCH0 -at 0x0000013e : */ 0x7a341000,0x00000000, +at 0x0000013c : */ 0x7a341000,0x00000000, /* ENTRY patch_output_data patch_output_data: JUMP 0 -at 0x00000140 : */ 0x80080000,0x00000000, +at 0x0000013e : */ 0x80080000,0x00000000, /* MOVE FROM dsa_dataout+0x0000, WHEN DATA_OUT -at 0x00000142 : */ 0x18000000,0x00000428, +at 0x00000140 : */ 0x18000000,0x00000428, /* MOVE FROM dsa_dataout+0x0008, WHEN DATA_OUT -at 0x00000144 : */ 0x18000000,0x00000430, +at 0x00000142 : */ 0x18000000,0x00000430, /* MOVE FROM dsa_dataout+0x0010, WHEN DATA_OUT -at 0x00000146 : */ 0x18000000,0x00000438, +at 0x00000144 : */ 0x18000000,0x00000438, /* MOVE FROM dsa_dataout+0x0018, WHEN DATA_OUT -at 0x00000148 : */ 0x18000000,0x00000440, +at 0x00000146 : */ 0x18000000,0x00000440, /* MOVE FROM dsa_dataout+0x0020, WHEN DATA_OUT -at 0x0000014a : */ 0x18000000,0x00000448, +at 0x00000148 : */ 0x18000000,0x00000448, /* MOVE FROM dsa_dataout+0x0028, WHEN DATA_OUT -at 0x0000014c : */ 0x18000000,0x00000450, +at 0x0000014a : */ 0x18000000,0x00000450, /* MOVE FROM dsa_dataout+0x0030, WHEN DATA_OUT -at 0x0000014e : */ 0x18000000,0x00000458, +at 0x0000014c : */ 0x18000000,0x00000458, /* MOVE FROM dsa_dataout+0x0038, WHEN DATA_OUT -at 0x00000150 : */ 0x18000000,0x00000460, +at 0x0000014e : */ 0x18000000,0x00000460, /* MOVE FROM dsa_dataout+0x0040, WHEN DATA_OUT -at 0x00000152 : */ 0x18000000,0x00000468, +at 0x00000150 : */ 0x18000000,0x00000468, /* MOVE FROM dsa_dataout+0x0048, WHEN DATA_OUT -at 0x00000154 : */ 0x18000000,0x00000470, +at 0x00000152 : */ 0x18000000,0x00000470, /* MOVE FROM dsa_dataout+0x0050, WHEN DATA_OUT -at 0x00000156 : */ 0x18000000,0x00000478, +at 0x00000154 : */ 0x18000000,0x00000478, /* MOVE FROM dsa_dataout+0x0058, WHEN DATA_OUT -at 0x00000158 : */ 0x18000000,0x00000480, +at 0x00000156 : */ 0x18000000,0x00000480, /* MOVE FROM dsa_dataout+0x0060, WHEN DATA_OUT -at 0x0000015a : */ 0x18000000,0x00000488, +at 0x00000158 : */ 0x18000000,0x00000488, /* MOVE FROM dsa_dataout+0x0068, WHEN DATA_OUT -at 0x0000015c : */ 0x18000000,0x00000490, +at 0x0000015a : */ 0x18000000,0x00000490, /* MOVE FROM dsa_dataout+0x0070, WHEN DATA_OUT -at 0x0000015e : */ 0x18000000,0x00000498, +at 0x0000015c : */ 0x18000000,0x00000498, /* MOVE FROM dsa_dataout+0x0078, WHEN DATA_OUT -at 0x00000160 : */ 0x18000000,0x000004a0, +at 0x0000015e : */ 0x18000000,0x000004a0, /* MOVE FROM dsa_dataout+0x0080, WHEN DATA_OUT -at 0x00000162 : */ 0x18000000,0x000004a8, +at 0x00000160 : */ 0x18000000,0x000004a8, /* MOVE FROM dsa_dataout+0x0088, WHEN DATA_OUT -at 0x00000164 : */ 0x18000000,0x000004b0, +at 0x00000162 : */ 0x18000000,0x000004b0, /* MOVE FROM dsa_dataout+0x0090, WHEN DATA_OUT -at 0x00000166 : */ 0x18000000,0x000004b8, +at 0x00000164 : */ 0x18000000,0x000004b8, /* MOVE FROM dsa_dataout+0x0098, WHEN DATA_OUT -at 0x00000168 : */ 0x18000000,0x000004c0, +at 0x00000166 : */ 0x18000000,0x000004c0, /* MOVE FROM dsa_dataout+0x00a0, WHEN DATA_OUT -at 0x0000016a : */ 0x18000000,0x000004c8, +at 0x00000168 : */ 0x18000000,0x000004c8, /* MOVE FROM dsa_dataout+0x00a8, WHEN DATA_OUT -at 0x0000016c : */ 0x18000000,0x000004d0, +at 0x0000016a : */ 0x18000000,0x000004d0, /* MOVE FROM dsa_dataout+0x00b0, WHEN DATA_OUT -at 0x0000016e : */ 0x18000000,0x000004d8, +at 0x0000016c : */ 0x18000000,0x000004d8, /* MOVE FROM dsa_dataout+0x00b8, WHEN DATA_OUT -at 0x00000170 : */ 0x18000000,0x000004e0, +at 0x0000016e : */ 0x18000000,0x000004e0, /* MOVE FROM dsa_dataout+0x00c0, WHEN DATA_OUT -at 0x00000172 : */ 0x18000000,0x000004e8, +at 0x00000170 : */ 0x18000000,0x000004e8, /* MOVE FROM dsa_dataout+0x00c8, WHEN DATA_OUT -at 0x00000174 : */ 0x18000000,0x000004f0, +at 0x00000172 : */ 0x18000000,0x000004f0, /* MOVE FROM dsa_dataout+0x00d0, WHEN DATA_OUT -at 0x00000176 : */ 0x18000000,0x000004f8, +at 0x00000174 : */ 0x18000000,0x000004f8, /* MOVE FROM dsa_dataout+0x00d8, WHEN DATA_OUT -at 0x00000178 : */ 0x18000000,0x00000500, +at 0x00000176 : */ 0x18000000,0x00000500, /* MOVE FROM dsa_dataout+0x00e0, WHEN DATA_OUT -at 0x0000017a : */ 0x18000000,0x00000508, +at 0x00000178 : */ 0x18000000,0x00000508, /* MOVE FROM dsa_dataout+0x00e8, WHEN DATA_OUT -at 0x0000017c : */ 0x18000000,0x00000510, +at 0x0000017a : */ 0x18000000,0x00000510, /* MOVE FROM dsa_dataout+0x00f0, WHEN DATA_OUT -at 0x0000017e : */ 0x18000000,0x00000518, +at 0x0000017c : */ 0x18000000,0x00000518, /* MOVE FROM dsa_dataout+0x00f8, WHEN DATA_OUT -at 0x00000180 : */ 0x18000000,0x00000520, +at 0x0000017e : */ 0x18000000,0x00000520, /* MOVE FROM dsa_dataout+0x0100, WHEN DATA_OUT -at 0x00000182 : */ 0x18000000,0x00000528, +at 0x00000180 : */ 0x18000000,0x00000528, /* MOVE FROM dsa_dataout+0x0108, WHEN DATA_OUT -at 0x00000184 : */ 0x18000000,0x00000530, +at 0x00000182 : */ 0x18000000,0x00000530, /* MOVE FROM dsa_dataout+0x0110, WHEN DATA_OUT -at 0x00000186 : */ 0x18000000,0x00000538, +at 0x00000184 : */ 0x18000000,0x00000538, /* MOVE FROM dsa_dataout+0x0118, WHEN DATA_OUT -at 0x00000188 : */ 0x18000000,0x00000540, +at 0x00000186 : */ 0x18000000,0x00000540, /* MOVE FROM dsa_dataout+0x0120, WHEN DATA_OUT -at 0x0000018a : */ 0x18000000,0x00000548, +at 0x00000188 : */ 0x18000000,0x00000548, /* MOVE FROM dsa_dataout+0x0128, WHEN DATA_OUT -at 0x0000018c : */ 0x18000000,0x00000550, +at 0x0000018a : */ 0x18000000,0x00000550, /* MOVE FROM dsa_dataout+0x0130, WHEN DATA_OUT -at 0x0000018e : */ 0x18000000,0x00000558, +at 0x0000018c : */ 0x18000000,0x00000558, /* MOVE FROM dsa_dataout+0x0138, WHEN DATA_OUT -at 0x00000190 : */ 0x18000000,0x00000560, +at 0x0000018e : */ 0x18000000,0x00000560, /* MOVE FROM dsa_dataout+0x0140, WHEN DATA_OUT -at 0x00000192 : */ 0x18000000,0x00000568, +at 0x00000190 : */ 0x18000000,0x00000568, /* MOVE FROM dsa_dataout+0x0148, WHEN DATA_OUT -at 0x00000194 : */ 0x18000000,0x00000570, +at 0x00000192 : */ 0x18000000,0x00000570, /* MOVE FROM dsa_dataout+0x0150, WHEN DATA_OUT -at 0x00000196 : */ 0x18000000,0x00000578, +at 0x00000194 : */ 0x18000000,0x00000578, /* MOVE FROM dsa_dataout+0x0158, WHEN DATA_OUT -at 0x00000198 : */ 0x18000000,0x00000580, +at 0x00000196 : */ 0x18000000,0x00000580, /* MOVE FROM dsa_dataout+0x0160, WHEN DATA_OUT -at 0x0000019a : */ 0x18000000,0x00000588, +at 0x00000198 : */ 0x18000000,0x00000588, /* MOVE FROM dsa_dataout+0x0168, WHEN DATA_OUT -at 0x0000019c : */ 0x18000000,0x00000590, +at 0x0000019a : */ 0x18000000,0x00000590, /* MOVE FROM dsa_dataout+0x0170, WHEN DATA_OUT -at 0x0000019e : */ 0x18000000,0x00000598, +at 0x0000019c : */ 0x18000000,0x00000598, /* MOVE FROM dsa_dataout+0x0178, WHEN DATA_OUT -at 0x000001a0 : */ 0x18000000,0x000005a0, +at 0x0000019e : */ 0x18000000,0x000005a0, /* MOVE FROM dsa_dataout+0x0180, WHEN DATA_OUT -at 0x000001a2 : */ 0x18000000,0x000005a8, +at 0x000001a0 : */ 0x18000000,0x000005a8, /* MOVE FROM dsa_dataout+0x0188, WHEN DATA_OUT -at 0x000001a4 : */ 0x18000000,0x000005b0, +at 0x000001a2 : */ 0x18000000,0x000005b0, /* MOVE FROM dsa_dataout+0x0190, WHEN DATA_OUT -at 0x000001a6 : */ 0x18000000,0x000005b8, +at 0x000001a4 : */ 0x18000000,0x000005b8, /* MOVE FROM dsa_dataout+0x0198, WHEN DATA_OUT -at 0x000001a8 : */ 0x18000000,0x000005c0, +at 0x000001a6 : */ 0x18000000,0x000005c0, /* MOVE FROM dsa_dataout+0x01a0, WHEN DATA_OUT -at 0x000001aa : */ 0x18000000,0x000005c8, +at 0x000001a8 : */ 0x18000000,0x000005c8, /* MOVE FROM dsa_dataout+0x01a8, WHEN DATA_OUT -at 0x000001ac : */ 0x18000000,0x000005d0, +at 0x000001aa : */ 0x18000000,0x000005d0, /* MOVE FROM dsa_dataout+0x01b0, WHEN DATA_OUT -at 0x000001ae : */ 0x18000000,0x000005d8, +at 0x000001ac : */ 0x18000000,0x000005d8, /* MOVE FROM dsa_dataout+0x01b8, WHEN DATA_OUT -at 0x000001b0 : */ 0x18000000,0x000005e0, +at 0x000001ae : */ 0x18000000,0x000005e0, /* MOVE FROM dsa_dataout+0x01c0, WHEN DATA_OUT -at 0x000001b2 : */ 0x18000000,0x000005e8, +at 0x000001b0 : */ 0x18000000,0x000005e8, /* MOVE FROM dsa_dataout+0x01c8, WHEN DATA_OUT -at 0x000001b4 : */ 0x18000000,0x000005f0, +at 0x000001b2 : */ 0x18000000,0x000005f0, /* MOVE FROM dsa_dataout+0x01d0, WHEN DATA_OUT -at 0x000001b6 : */ 0x18000000,0x000005f8, +at 0x000001b4 : */ 0x18000000,0x000005f8, /* MOVE FROM dsa_dataout+0x01d8, WHEN DATA_OUT -at 0x000001b8 : */ 0x18000000,0x00000600, +at 0x000001b6 : */ 0x18000000,0x00000600, /* MOVE FROM dsa_dataout+0x01e0, WHEN DATA_OUT -at 0x000001ba : */ 0x18000000,0x00000608, +at 0x000001b8 : */ 0x18000000,0x00000608, /* MOVE FROM dsa_dataout+0x01e8, WHEN DATA_OUT -at 0x000001bc : */ 0x18000000,0x00000610, +at 0x000001ba : */ 0x18000000,0x00000610, /* MOVE FROM dsa_dataout+0x01f0, WHEN DATA_OUT -at 0x000001be : */ 0x18000000,0x00000618, +at 0x000001bc : */ 0x18000000,0x00000618, /* MOVE FROM dsa_dataout+0x01f8, WHEN DATA_OUT -at 0x000001c0 : */ 0x18000000,0x00000620, +at 0x000001be : */ 0x18000000,0x00000620, /* MOVE FROM dsa_dataout+0x0200, WHEN DATA_OUT -at 0x000001c2 : */ 0x18000000,0x00000628, +at 0x000001c0 : */ 0x18000000,0x00000628, /* MOVE FROM dsa_dataout+0x0208, WHEN DATA_OUT -at 0x000001c4 : */ 0x18000000,0x00000630, +at 0x000001c2 : */ 0x18000000,0x00000630, /* MOVE FROM dsa_dataout+0x0210, WHEN DATA_OUT -at 0x000001c6 : */ 0x18000000,0x00000638, +at 0x000001c4 : */ 0x18000000,0x00000638, /* MOVE FROM dsa_dataout+0x0218, WHEN DATA_OUT -at 0x000001c8 : */ 0x18000000,0x00000640, +at 0x000001c6 : */ 0x18000000,0x00000640, /* MOVE FROM dsa_dataout+0x0220, WHEN DATA_OUT -at 0x000001ca : */ 0x18000000,0x00000648, +at 0x000001c8 : */ 0x18000000,0x00000648, /* MOVE FROM dsa_dataout+0x0228, WHEN DATA_OUT -at 0x000001cc : */ 0x18000000,0x00000650, +at 0x000001ca : */ 0x18000000,0x00000650, /* MOVE FROM dsa_dataout+0x0230, WHEN DATA_OUT -at 0x000001ce : */ 0x18000000,0x00000658, +at 0x000001cc : */ 0x18000000,0x00000658, /* MOVE FROM dsa_dataout+0x0238, WHEN DATA_OUT -at 0x000001d0 : */ 0x18000000,0x00000660, +at 0x000001ce : */ 0x18000000,0x00000660, /* MOVE FROM dsa_dataout+0x0240, WHEN DATA_OUT -at 0x000001d2 : */ 0x18000000,0x00000668, +at 0x000001d0 : */ 0x18000000,0x00000668, /* MOVE FROM dsa_dataout+0x0248, WHEN DATA_OUT -at 0x000001d4 : */ 0x18000000,0x00000670, +at 0x000001d2 : */ 0x18000000,0x00000670, /* MOVE FROM dsa_dataout+0x0250, WHEN DATA_OUT -at 0x000001d6 : */ 0x18000000,0x00000678, +at 0x000001d4 : */ 0x18000000,0x00000678, /* MOVE FROM dsa_dataout+0x0258, WHEN DATA_OUT -at 0x000001d8 : */ 0x18000000,0x00000680, +at 0x000001d6 : */ 0x18000000,0x00000680, /* MOVE FROM dsa_dataout+0x0260, WHEN DATA_OUT -at 0x000001da : */ 0x18000000,0x00000688, +at 0x000001d8 : */ 0x18000000,0x00000688, /* MOVE FROM dsa_dataout+0x0268, WHEN DATA_OUT -at 0x000001dc : */ 0x18000000,0x00000690, +at 0x000001da : */ 0x18000000,0x00000690, /* MOVE FROM dsa_dataout+0x0270, WHEN DATA_OUT -at 0x000001de : */ 0x18000000,0x00000698, +at 0x000001dc : */ 0x18000000,0x00000698, /* MOVE FROM dsa_dataout+0x0278, WHEN DATA_OUT -at 0x000001e0 : */ 0x18000000,0x000006a0, +at 0x000001de : */ 0x18000000,0x000006a0, /* MOVE FROM dsa_dataout+0x0280, WHEN DATA_OUT -at 0x000001e2 : */ 0x18000000,0x000006a8, +at 0x000001e0 : */ 0x18000000,0x000006a8, /* MOVE FROM dsa_dataout+0x0288, WHEN DATA_OUT -at 0x000001e4 : */ 0x18000000,0x000006b0, +at 0x000001e2 : */ 0x18000000,0x000006b0, /* MOVE FROM dsa_dataout+0x0290, WHEN DATA_OUT -at 0x000001e6 : */ 0x18000000,0x000006b8, +at 0x000001e4 : */ 0x18000000,0x000006b8, /* MOVE FROM dsa_dataout+0x0298, WHEN DATA_OUT -at 0x000001e8 : */ 0x18000000,0x000006c0, +at 0x000001e6 : */ 0x18000000,0x000006c0, /* MOVE FROM dsa_dataout+0x02a0, WHEN DATA_OUT -at 0x000001ea : */ 0x18000000,0x000006c8, +at 0x000001e8 : */ 0x18000000,0x000006c8, /* MOVE FROM dsa_dataout+0x02a8, WHEN DATA_OUT -at 0x000001ec : */ 0x18000000,0x000006d0, +at 0x000001ea : */ 0x18000000,0x000006d0, /* MOVE FROM dsa_dataout+0x02b0, WHEN DATA_OUT -at 0x000001ee : */ 0x18000000,0x000006d8, +at 0x000001ec : */ 0x18000000,0x000006d8, /* MOVE FROM dsa_dataout+0x02b8, WHEN DATA_OUT -at 0x000001f0 : */ 0x18000000,0x000006e0, +at 0x000001ee : */ 0x18000000,0x000006e0, /* MOVE FROM dsa_dataout+0x02c0, WHEN DATA_OUT -at 0x000001f2 : */ 0x18000000,0x000006e8, +at 0x000001f0 : */ 0x18000000,0x000006e8, /* MOVE FROM dsa_dataout+0x02c8, WHEN DATA_OUT -at 0x000001f4 : */ 0x18000000,0x000006f0, +at 0x000001f2 : */ 0x18000000,0x000006f0, /* MOVE FROM dsa_dataout+0x02d0, WHEN DATA_OUT -at 0x000001f6 : */ 0x18000000,0x000006f8, +at 0x000001f4 : */ 0x18000000,0x000006f8, /* MOVE FROM dsa_dataout+0x02d8, WHEN DATA_OUT -at 0x000001f8 : */ 0x18000000,0x00000700, +at 0x000001f6 : */ 0x18000000,0x00000700, /* MOVE FROM dsa_dataout+0x02e0, WHEN DATA_OUT -at 0x000001fa : */ 0x18000000,0x00000708, +at 0x000001f8 : */ 0x18000000,0x00000708, /* MOVE FROM dsa_dataout+0x02e8, WHEN DATA_OUT -at 0x000001fc : */ 0x18000000,0x00000710, +at 0x000001fa : */ 0x18000000,0x00000710, /* MOVE FROM dsa_dataout+0x02f0, WHEN DATA_OUT -at 0x000001fe : */ 0x18000000,0x00000718, +at 0x000001fc : */ 0x18000000,0x00000718, /* MOVE FROM dsa_dataout+0x02f8, WHEN DATA_OUT -at 0x00000200 : */ 0x18000000,0x00000720, +at 0x000001fe : */ 0x18000000,0x00000720, /* MOVE FROM dsa_dataout+0x0300, WHEN DATA_OUT -at 0x00000202 : */ 0x18000000,0x00000728, +at 0x00000200 : */ 0x18000000,0x00000728, /* MOVE FROM dsa_dataout+0x0308, WHEN DATA_OUT -at 0x00000204 : */ 0x18000000,0x00000730, +at 0x00000202 : */ 0x18000000,0x00000730, /* MOVE FROM dsa_dataout+0x0310, WHEN DATA_OUT -at 0x00000206 : */ 0x18000000,0x00000738, +at 0x00000204 : */ 0x18000000,0x00000738, /* MOVE FROM dsa_dataout+0x0318, WHEN DATA_OUT -at 0x00000208 : */ 0x18000000,0x00000740, +at 0x00000206 : */ 0x18000000,0x00000740, /* MOVE FROM dsa_dataout+0x0320, WHEN DATA_OUT -at 0x0000020a : */ 0x18000000,0x00000748, +at 0x00000208 : */ 0x18000000,0x00000748, /* MOVE FROM dsa_dataout+0x0328, WHEN DATA_OUT -at 0x0000020c : */ 0x18000000,0x00000750, +at 0x0000020a : */ 0x18000000,0x00000750, /* MOVE FROM dsa_dataout+0x0330, WHEN DATA_OUT -at 0x0000020e : */ 0x18000000,0x00000758, +at 0x0000020c : */ 0x18000000,0x00000758, /* MOVE FROM dsa_dataout+0x0338, WHEN DATA_OUT -at 0x00000210 : */ 0x18000000,0x00000760, +at 0x0000020e : */ 0x18000000,0x00000760, /* MOVE FROM dsa_dataout+0x0340, WHEN DATA_OUT -at 0x00000212 : */ 0x18000000,0x00000768, +at 0x00000210 : */ 0x18000000,0x00000768, /* MOVE FROM dsa_dataout+0x0348, WHEN DATA_OUT -at 0x00000214 : */ 0x18000000,0x00000770, +at 0x00000212 : */ 0x18000000,0x00000770, /* MOVE FROM dsa_dataout+0x0350, WHEN DATA_OUT -at 0x00000216 : */ 0x18000000,0x00000778, +at 0x00000214 : */ 0x18000000,0x00000778, /* MOVE FROM dsa_dataout+0x0358, WHEN DATA_OUT -at 0x00000218 : */ 0x18000000,0x00000780, +at 0x00000216 : */ 0x18000000,0x00000780, /* MOVE FROM dsa_dataout+0x0360, WHEN DATA_OUT -at 0x0000021a : */ 0x18000000,0x00000788, +at 0x00000218 : */ 0x18000000,0x00000788, /* MOVE FROM dsa_dataout+0x0368, WHEN DATA_OUT -at 0x0000021c : */ 0x18000000,0x00000790, +at 0x0000021a : */ 0x18000000,0x00000790, /* MOVE FROM dsa_dataout+0x0370, WHEN DATA_OUT -at 0x0000021e : */ 0x18000000,0x00000798, +at 0x0000021c : */ 0x18000000,0x00000798, /* MOVE FROM dsa_dataout+0x0378, WHEN DATA_OUT -at 0x00000220 : */ 0x18000000,0x000007a0, +at 0x0000021e : */ 0x18000000,0x000007a0, /* MOVE FROM dsa_dataout+0x0380, WHEN DATA_OUT -at 0x00000222 : */ 0x18000000,0x000007a8, +at 0x00000220 : */ 0x18000000,0x000007a8, /* MOVE FROM dsa_dataout+0x0388, WHEN DATA_OUT -at 0x00000224 : */ 0x18000000,0x000007b0, +at 0x00000222 : */ 0x18000000,0x000007b0, /* MOVE FROM dsa_dataout+0x0390, WHEN DATA_OUT -at 0x00000226 : */ 0x18000000,0x000007b8, +at 0x00000224 : */ 0x18000000,0x000007b8, /* MOVE FROM dsa_dataout+0x0398, WHEN DATA_OUT -at 0x00000228 : */ 0x18000000,0x000007c0, +at 0x00000226 : */ 0x18000000,0x000007c0, /* MOVE FROM dsa_dataout+0x03a0, WHEN DATA_OUT -at 0x0000022a : */ 0x18000000,0x000007c8, +at 0x00000228 : */ 0x18000000,0x000007c8, /* MOVE FROM dsa_dataout+0x03a8, WHEN DATA_OUT -at 0x0000022c : */ 0x18000000,0x000007d0, +at 0x0000022a : */ 0x18000000,0x000007d0, /* MOVE FROM dsa_dataout+0x03b0, WHEN DATA_OUT -at 0x0000022e : */ 0x18000000,0x000007d8, +at 0x0000022c : */ 0x18000000,0x000007d8, /* MOVE FROM dsa_dataout+0x03b8, WHEN DATA_OUT -at 0x00000230 : */ 0x18000000,0x000007e0, +at 0x0000022e : */ 0x18000000,0x000007e0, /* MOVE FROM dsa_dataout+0x03c0, WHEN DATA_OUT -at 0x00000232 : */ 0x18000000,0x000007e8, +at 0x00000230 : */ 0x18000000,0x000007e8, /* MOVE FROM dsa_dataout+0x03c8, WHEN DATA_OUT -at 0x00000234 : */ 0x18000000,0x000007f0, +at 0x00000232 : */ 0x18000000,0x000007f0, /* MOVE FROM dsa_dataout+0x03d0, WHEN DATA_OUT -at 0x00000236 : */ 0x18000000,0x000007f8, +at 0x00000234 : */ 0x18000000,0x000007f8, /* MOVE FROM dsa_dataout+0x03d8, WHEN DATA_OUT -at 0x00000238 : */ 0x18000000,0x00000800, +at 0x00000236 : */ 0x18000000,0x00000800, /* MOVE FROM dsa_dataout+0x03e0, WHEN DATA_OUT -at 0x0000023a : */ 0x18000000,0x00000808, +at 0x00000238 : */ 0x18000000,0x00000808, /* MOVE FROM dsa_dataout+0x03e8, WHEN DATA_OUT -at 0x0000023c : */ 0x18000000,0x00000810, +at 0x0000023a : */ 0x18000000,0x00000810, /* MOVE FROM dsa_dataout+0x03f0, WHEN DATA_OUT -at 0x0000023e : */ 0x18000000,0x00000818, +at 0x0000023c : */ 0x18000000,0x00000818, /* MOVE FROM dsa_dataout+0x03f8, WHEN DATA_OUT -at 0x00000240 : */ 0x18000000,0x00000820, +at 0x0000023e : */ 0x18000000,0x00000820, /* ENTRY end_data_trans end_data_trans: redo_msgin3: JUMP get_status, WHEN STATUS -at 0x00000242 : */ 0x830b0000,0x000000a0, +at 0x00000240 : */ 0x830b0000,0x00000098, /* JUMP get_msgin3, WHEN MSG_IN -at 0x00000244 : */ 0x870b0000,0x00000b20, +at 0x00000242 : */ 0x870b0000,0x00000b78, /* INT int_data_bad_phase -at 0x00000246 : */ 0x98080000,0xab93000b, +at 0x00000244 : */ 0x98080000,0xab93000b, /* get_msgin1: MOVE SCRATCH0 | had_msgin TO SCRATCH0 -at 0x00000248 : */ 0x7a344000,0x00000000, +at 0x00000246 : */ 0x7a344000,0x00000000, /* MOVE 1, msgin_buf, WHEN MSG_IN -at 0x0000024a : */ 0x0f000001,0x00000000, +at 0x00000248 : */ 0x0f000001,0x00000000, /* JUMP ext_msg1, IF 0x01 ; Extended Message -at 0x0000024c : */ 0x800c0001,0x00000968, +at 0x0000024a : */ 0x800c0001,0x00000960, /* JUMP ignore_msg1, IF 0x02 ; Save Data Pointers -at 0x0000024e : */ 0x800c0002,0x00000958, +at 0x0000024c : */ 0x800c0002,0x00000950, /* JUMP ignore_msg1, IF 0x03 ; Save Restore Pointers -at 0x00000250 : */ 0x800c0003,0x00000958, +at 0x0000024e : */ 0x800c0003,0x00000950, /* JUMP disc1, IF 0x04 ; Disconnect -at 0x00000252 : */ 0x800c0004,0x000009c8, +at 0x00000250 : */ 0x800c0004,0x000009f0, /* INT int_bad_msg1 -at 0x00000254 : */ 0x98080000,0xab930006, +at 0x00000252 : */ 0x98080000,0xab930006, /* ignore_msg1: CLEAR ACK -at 0x00000256 : */ 0x60000040,0x00000000, +at 0x00000254 : */ 0x60000040,0x00000000, /* JUMP redo_msgin1 -at 0x00000258 : */ 0x80080000,0x00000058, +at 0x00000256 : */ 0x80080000,0x00000050, /* ext_msg1: MOVE SCRATCH0 | had_extmsg TO SCRATCH0 -at 0x0000025a : */ 0x7a348000,0x00000000, +at 0x00000258 : */ 0x7a348000,0x00000000, /* CLEAR ACK -at 0x0000025c : */ 0x60000040,0x00000000, +at 0x0000025a : */ 0x60000040,0x00000000, /* MOVE 1, msgin_buf + 1, WHEN MSG_IN -at 0x0000025e : */ 0x0f000001,0x00000001, +at 0x0000025c : */ 0x0f000001,0x00000001, /* - JUMP ext_msg1a, IF 0x03 + JUMP reject_msg1, IF NOT 0x03 ; Only handle SDTR -at 0x00000260 : */ 0x800c0003,0x00000990, +at 0x0000025e : */ 0x80040003,0x000009b0, /* - INT int_bad_extmsg1a + CLEAR ACK -at 0x00000262 : */ 0x98080000,0xab930000, +at 0x00000260 : */ 0x60000040,0x00000000, +/* + MOVE 1, msgin_buf + 2, WHEN MSG_IN + +at 0x00000262 : */ 0x0f000001,0x00000002, +/* + JUMP reject_msg1, IF NOT 0x01 ; Only handle SDTR + +at 0x00000264 : */ 0x80040001,0x000009b0, /* -ext_msg1a: CLEAR ACK -at 0x00000264 : */ 0x60000040,0x00000000, +at 0x00000266 : */ 0x60000040,0x00000000, /* - MOVE 1, msgin_buf + 2, WHEN MSG_IN + MOVE 2, msgin_buf + 3, WHEN MSG_IN -at 0x00000266 : */ 0x0f000001,0x00000002, +at 0x00000268 : */ 0x0f000002,0x00000003, /* - JUMP ext_msg1b, IF 0x01 ; Must be SDTR + INT int_msg_sdtr1 + +at 0x0000026a : */ 0x98080000,0xab93000c, +/* +reject_msg1: + MOVE SCRATCH1 | did_reject TO SCRATCH1 -at 0x00000268 : */ 0x800c0001,0x000009b0, +at 0x0000026c : */ 0x7a350100,0x00000000, /* - INT int_bad_extmsg1b + SET ATN -at 0x0000026a : */ 0x98080000,0xab930001, +at 0x0000026e : */ 0x58000008,0x00000000, /* -ext_msg1b: CLEAR ACK -at 0x0000026c : */ 0x60000040,0x00000000, +at 0x00000270 : */ 0x60000040,0x00000000, /* - MOVE 2, msgin_buf + 3, WHEN MSG_IN + JUMP reject_msg1a, WHEN NOT MSG_IN -at 0x0000026e : */ 0x0f000002,0x00000003, +at 0x00000272 : */ 0x87030000,0x000009e0, /* - INT int_msg_sdtr1 + MOVE 1, msgin_buf + 7, WHEN MSG_IN -at 0x00000270 : */ 0x98080000,0xab93000c, +at 0x00000274 : */ 0x0f000001,0x00000007, +/* + JUMP reject_msg1 + +at 0x00000276 : */ 0x80080000,0x000009b0, +/* +reject_msg1a: + MOVE 1, msg_reject, WHEN MSG_OUT + +at 0x00000278 : */ 0x0e000001,0x00000000, +/* + JUMP redo_msgin1 + +at 0x0000027a : */ 0x80080000,0x00000050, /* disc1: CLEAR ACK -at 0x00000272 : */ 0x60000040,0x00000000, +at 0x0000027c : */ 0x60000040,0x00000000, /* ENTRY wait_disc1 wait_disc1: WAIT DISCONNECT -at 0x00000274 : */ 0x48000000,0x00000000, +at 0x0000027e : */ 0x48000000,0x00000000, /* INT int_disc1 -at 0x00000276 : */ 0x98080000,0xab930019, +at 0x00000280 : */ 0x98080000,0xab930019, /* ENTRY resume_msgin1a resume_msgin1a: CLEAR ACK -at 0x00000278 : */ 0x60000040,0x00000000, +at 0x00000282 : */ 0x60000040,0x00000000, /* JUMP redo_msgin1 -at 0x0000027a : */ 0x80080000,0x00000058, +at 0x00000284 : */ 0x80080000,0x00000050, /* ENTRY resume_msgin1b resume_msgin1b: SET ATN -at 0x0000027c : */ 0x58000008,0x00000000, +at 0x00000286 : */ 0x58000008,0x00000000, /* CLEAR ACK -at 0x0000027e : */ 0x60000040,0x00000000, +at 0x00000288 : */ 0x60000040,0x00000000, /* INT int_no_msgout1, WHEN NOT MSG_OUT -at 0x00000280 : */ 0x9e030000,0xab93000f, +at 0x0000028a : */ 0x9e030000,0xab93000f, /* MOVE SCRATCH0 | had_msgout TO SCRATCH0 -at 0x00000282 : */ 0x7a340200,0x00000000, +at 0x0000028c : */ 0x7a340200,0x00000000, /* MOVE FROM dsa_msgout, when MSG_OUT -at 0x00000284 : */ 0x1e000000,0x00000008, +at 0x0000028e : */ 0x1e000000,0x00000008, /* JUMP redo_msgin1 -at 0x00000286 : */ 0x80080000,0x00000058, +at 0x00000290 : */ 0x80080000,0x00000050, /* get_msgin2: MOVE SCRATCH0 | had_msgin TO SCRATCH0 -at 0x00000288 : */ 0x7a344000,0x00000000, +at 0x00000292 : */ 0x7a344000,0x00000000, /* MOVE 1, msgin_buf, WHEN MSG_IN -at 0x0000028a : */ 0x0f000001,0x00000000, +at 0x00000294 : */ 0x0f000001,0x00000000, /* JUMP ext_msg2, IF 0x01 ; Extended Message -at 0x0000028c : */ 0x800c0001,0x00000a68, +at 0x00000296 : */ 0x800c0001,0x00000a90, /* JUMP ignore_msg2, IF 0x02 ; Save Data Pointers -at 0x0000028e : */ 0x800c0002,0x00000a58, +at 0x00000298 : */ 0x800c0002,0x00000a80, /* JUMP ignore_msg2, IF 0x03 ; Save Restore Pointers -at 0x00000290 : */ 0x800c0003,0x00000a58, +at 0x0000029a : */ 0x800c0003,0x00000a80, /* JUMP disc2, IF 0x04 ; Disconnect -at 0x00000292 : */ 0x800c0004,0x00000ac8, +at 0x0000029c : */ 0x800c0004,0x00000b20, /* INT int_bad_msg2 -at 0x00000294 : */ 0x98080000,0xab930007, +at 0x0000029e : */ 0x98080000,0xab930007, /* ignore_msg2: CLEAR ACK -at 0x00000296 : */ 0x60000040,0x00000000, +at 0x000002a0 : */ 0x60000040,0x00000000, /* JUMP redo_msgin2 -at 0x00000298 : */ 0x80080000,0x00000078, +at 0x000002a2 : */ 0x80080000,0x00000070, /* ext_msg2: MOVE SCRATCH0 | had_extmsg TO SCRATCH0 -at 0x0000029a : */ 0x7a348000,0x00000000, +at 0x000002a4 : */ 0x7a348000,0x00000000, /* CLEAR ACK -at 0x0000029c : */ 0x60000040,0x00000000, +at 0x000002a6 : */ 0x60000040,0x00000000, /* MOVE 1, msgin_buf + 1, WHEN MSG_IN -at 0x0000029e : */ 0x0f000001,0x00000001, +at 0x000002a8 : */ 0x0f000001,0x00000001, +/* + JUMP reject_msg2, IF NOT 0x03 ; Only handle SDTR + +at 0x000002aa : */ 0x80040003,0x00000ae0, +/* + CLEAR ACK + +at 0x000002ac : */ 0x60000040,0x00000000, /* - JUMP ext_msg2a, IF 0x03 + MOVE 1, msgin_buf + 2, WHEN MSG_IN -at 0x000002a0 : */ 0x800c0003,0x00000a90, +at 0x000002ae : */ 0x0f000001,0x00000002, /* - INT int_bad_extmsg2a + JUMP reject_msg2, IF NOT 0x01 ; Only handle SDTR -at 0x000002a2 : */ 0x98080000,0xab930002, +at 0x000002b0 : */ 0x80040001,0x00000ae0, /* -ext_msg2a: CLEAR ACK -at 0x000002a4 : */ 0x60000040,0x00000000, +at 0x000002b2 : */ 0x60000040,0x00000000, /* - MOVE 1, msgin_buf + 2, WHEN MSG_IN + MOVE 2, msgin_buf + 3, WHEN MSG_IN + +at 0x000002b4 : */ 0x0f000002,0x00000003, +/* + INT int_msg_sdtr2 -at 0x000002a6 : */ 0x0f000001,0x00000002, +at 0x000002b6 : */ 0x98080000,0xab93000d, /* - JUMP ext_msg2b, IF 0x01 ; Must be SDTR +reject_msg2: + MOVE SCRATCH1 | did_reject TO SCRATCH1 -at 0x000002a8 : */ 0x800c0001,0x00000ab0, +at 0x000002b8 : */ 0x7a350100,0x00000000, /* - INT int_bad_extmsg2b + SET ATN -at 0x000002aa : */ 0x98080000,0xab930003, +at 0x000002ba : */ 0x58000008,0x00000000, /* -ext_msg2b: CLEAR ACK -at 0x000002ac : */ 0x60000040,0x00000000, +at 0x000002bc : */ 0x60000040,0x00000000, /* - MOVE 2, msgin_buf + 3, WHEN MSG_IN + JUMP reject_msg2a, WHEN NOT MSG_IN -at 0x000002ae : */ 0x0f000002,0x00000003, +at 0x000002be : */ 0x87030000,0x00000b10, /* - INT int_msg_sdtr2 + MOVE 1, msgin_buf + 7, WHEN MSG_IN + +at 0x000002c0 : */ 0x0f000001,0x00000007, +/* + JUMP reject_msg2 + +at 0x000002c2 : */ 0x80080000,0x00000ae0, +/* +reject_msg2a: + MOVE 1, msg_reject, WHEN MSG_OUT + +at 0x000002c4 : */ 0x0e000001,0x00000000, +/* + JUMP redo_msgin2 -at 0x000002b0 : */ 0x98080000,0xab93000d, +at 0x000002c6 : */ 0x80080000,0x00000070, /* disc2: CLEAR ACK -at 0x000002b2 : */ 0x60000040,0x00000000, +at 0x000002c8 : */ 0x60000040,0x00000000, /* ENTRY wait_disc2 wait_disc2: WAIT DISCONNECT -at 0x000002b4 : */ 0x48000000,0x00000000, +at 0x000002ca : */ 0x48000000,0x00000000, /* INT int_disc2 -at 0x000002b6 : */ 0x98080000,0xab93001a, +at 0x000002cc : */ 0x98080000,0xab93001a, /* ENTRY resume_msgin2a resume_msgin2a: CLEAR ACK -at 0x000002b8 : */ 0x60000040,0x00000000, +at 0x000002ce : */ 0x60000040,0x00000000, /* JUMP redo_msgin2 -at 0x000002ba : */ 0x80080000,0x00000078, +at 0x000002d0 : */ 0x80080000,0x00000070, /* ENTRY resume_msgin2b resume_msgin2b: SET ATN -at 0x000002bc : */ 0x58000008,0x00000000, +at 0x000002d2 : */ 0x58000008,0x00000000, /* CLEAR ACK -at 0x000002be : */ 0x60000040,0x00000000, +at 0x000002d4 : */ 0x60000040,0x00000000, /* INT int_no_msgout2, WHEN NOT MSG_OUT -at 0x000002c0 : */ 0x9e030000,0xab930010, +at 0x000002d6 : */ 0x9e030000,0xab930010, /* MOVE SCRATCH0 | had_msgout TO SCRATCH0 -at 0x000002c2 : */ 0x7a340200,0x00000000, +at 0x000002d8 : */ 0x7a340200,0x00000000, /* MOVE FROM dsa_msgout, when MSG_OUT -at 0x000002c4 : */ 0x1e000000,0x00000008, +at 0x000002da : */ 0x1e000000,0x00000008, /* JUMP redo_msgin2 -at 0x000002c6 : */ 0x80080000,0x00000078, +at 0x000002dc : */ 0x80080000,0x00000070, /* get_msgin3: MOVE SCRATCH0 | had_msgin TO SCRATCH0 -at 0x000002c8 : */ 0x7a344000,0x00000000, +at 0x000002de : */ 0x7a344000,0x00000000, /* MOVE 1, msgin_buf, WHEN MSG_IN -at 0x000002ca : */ 0x0f000001,0x00000000, +at 0x000002e0 : */ 0x0f000001,0x00000000, /* JUMP ext_msg3, IF 0x01 ; Extended Message -at 0x000002cc : */ 0x800c0001,0x00000b68, +at 0x000002e2 : */ 0x800c0001,0x00000bc0, /* JUMP ignore_msg3, IF 0x02 ; Save Data Pointers -at 0x000002ce : */ 0x800c0002,0x00000b58, +at 0x000002e4 : */ 0x800c0002,0x00000bb0, /* JUMP ignore_msg3, IF 0x03 ; Save Restore Pointers -at 0x000002d0 : */ 0x800c0003,0x00000b58, +at 0x000002e6 : */ 0x800c0003,0x00000bb0, /* JUMP disc3, IF 0x04 ; Disconnect -at 0x000002d2 : */ 0x800c0004,0x00000bc8, +at 0x000002e8 : */ 0x800c0004,0x00000c50, /* INT int_bad_msg3 -at 0x000002d4 : */ 0x98080000,0xab930008, +at 0x000002ea : */ 0x98080000,0xab930008, /* ignore_msg3: CLEAR ACK -at 0x000002d6 : */ 0x60000040,0x00000000, +at 0x000002ec : */ 0x60000040,0x00000000, /* JUMP redo_msgin3 -at 0x000002d8 : */ 0x80080000,0x00000908, +at 0x000002ee : */ 0x80080000,0x00000900, /* ext_msg3: MOVE SCRATCH0 | had_extmsg TO SCRATCH0 -at 0x000002da : */ 0x7a348000,0x00000000, +at 0x000002f0 : */ 0x7a348000,0x00000000, /* CLEAR ACK -at 0x000002dc : */ 0x60000040,0x00000000, +at 0x000002f2 : */ 0x60000040,0x00000000, /* MOVE 1, msgin_buf + 1, WHEN MSG_IN -at 0x000002de : */ 0x0f000001,0x00000001, +at 0x000002f4 : */ 0x0f000001,0x00000001, /* - JUMP ext_msg3a, IF 0x03 + JUMP reject_msg3, IF NOT 0x03 ; Only handle SDTR -at 0x000002e0 : */ 0x800c0003,0x00000b90, +at 0x000002f6 : */ 0x80040003,0x00000c10, /* - INT int_bad_extmsg3a + CLEAR ACK -at 0x000002e2 : */ 0x98080000,0xab930004, +at 0x000002f8 : */ 0x60000040,0x00000000, +/* + MOVE 1, msgin_buf + 2, WHEN MSG_IN + +at 0x000002fa : */ 0x0f000001,0x00000002, +/* + JUMP reject_msg3, IF NOT 0x01 ; Only handle SDTR + +at 0x000002fc : */ 0x80040001,0x00000c10, /* -ext_msg3a: CLEAR ACK -at 0x000002e4 : */ 0x60000040,0x00000000, +at 0x000002fe : */ 0x60000040,0x00000000, /* - MOVE 1, msgin_buf + 2, WHEN MSG_IN + MOVE 2, msgin_buf + 3, WHEN MSG_IN + +at 0x00000300 : */ 0x0f000002,0x00000003, +/* + INT int_msg_sdtr3 -at 0x000002e6 : */ 0x0f000001,0x00000002, +at 0x00000302 : */ 0x98080000,0xab93000e, /* - JUMP ext_msg3b, IF 0x01 ; Must be SDTR +reject_msg3: + MOVE SCRATCH1 | did_reject TO SCRATCH1 -at 0x000002e8 : */ 0x800c0001,0x00000bb0, +at 0x00000304 : */ 0x7a350100,0x00000000, /* - INT int_bad_extmsg3b + SET ATN -at 0x000002ea : */ 0x98080000,0xab930005, +at 0x00000306 : */ 0x58000008,0x00000000, /* -ext_msg3b: CLEAR ACK -at 0x000002ec : */ 0x60000040,0x00000000, +at 0x00000308 : */ 0x60000040,0x00000000, /* - MOVE 2, msgin_buf + 3, WHEN MSG_IN + JUMP reject_msg3a, WHEN NOT MSG_IN -at 0x000002ee : */ 0x0f000002,0x00000003, +at 0x0000030a : */ 0x87030000,0x00000c40, /* - INT int_msg_sdtr3 + MOVE 1, msgin_buf + 7, WHEN MSG_IN + +at 0x0000030c : */ 0x0f000001,0x00000007, +/* + JUMP reject_msg3 + +at 0x0000030e : */ 0x80080000,0x00000c10, +/* +reject_msg3a: + MOVE 1, msg_reject, WHEN MSG_OUT -at 0x000002f0 : */ 0x98080000,0xab93000e, +at 0x00000310 : */ 0x0e000001,0x00000000, +/* + JUMP redo_msgin3 + +at 0x00000312 : */ 0x80080000,0x00000900, /* disc3: CLEAR ACK -at 0x000002f2 : */ 0x60000040,0x00000000, +at 0x00000314 : */ 0x60000040,0x00000000, /* ENTRY wait_disc3 wait_disc3: WAIT DISCONNECT -at 0x000002f4 : */ 0x48000000,0x00000000, +at 0x00000316 : */ 0x48000000,0x00000000, /* INT int_disc3 -at 0x000002f6 : */ 0x98080000,0xab93001b, +at 0x00000318 : */ 0x98080000,0xab93001b, /* ENTRY resume_msgin3a resume_msgin3a: CLEAR ACK -at 0x000002f8 : */ 0x60000040,0x00000000, +at 0x0000031a : */ 0x60000040,0x00000000, /* JUMP redo_msgin3 -at 0x000002fa : */ 0x80080000,0x00000908, +at 0x0000031c : */ 0x80080000,0x00000900, /* ENTRY resume_msgin3b resume_msgin3b: SET ATN -at 0x000002fc : */ 0x58000008,0x00000000, +at 0x0000031e : */ 0x58000008,0x00000000, /* CLEAR ACK -at 0x000002fe : */ 0x60000040,0x00000000, +at 0x00000320 : */ 0x60000040,0x00000000, /* INT int_no_msgout3, WHEN NOT MSG_OUT -at 0x00000300 : */ 0x9e030000,0xab930011, +at 0x00000322 : */ 0x9e030000,0xab930011, /* MOVE SCRATCH0 | had_msgout TO SCRATCH0 -at 0x00000302 : */ 0x7a340200,0x00000000, +at 0x00000324 : */ 0x7a340200,0x00000000, /* MOVE FROM dsa_msgout, when MSG_OUT -at 0x00000304 : */ 0x1e000000,0x00000008, +at 0x00000326 : */ 0x1e000000,0x00000008, /* JUMP redo_msgin3 -at 0x00000306 : */ 0x80080000,0x00000908, +at 0x00000328 : */ 0x80080000,0x00000900, /* ENTRY resume_rej_ident resume_rej_ident: CLEAR ATN -at 0x00000308 : */ 0x60000008,0x00000000, +at 0x0000032a : */ 0x60000008,0x00000000, /* MOVE 1, msgin_buf, WHEN MSG_IN -at 0x0000030a : */ 0x0f000001,0x00000000, +at 0x0000032c : */ 0x0f000001,0x00000000, /* INT int_not_rej, IF NOT 0x07 ; Reject -at 0x0000030c : */ 0x98040007,0xab93001c, +at 0x0000032e : */ 0x98040007,0xab93001c, /* CLEAR ACK -at 0x0000030e : */ 0x60000040,0x00000000, +at 0x00000330 : */ 0x60000040,0x00000000, /* JUMP done_ident -at 0x00000310 : */ 0x80080000,0x00000050, +at 0x00000332 : */ 0x80080000,0x00000048, /* ENTRY reselect @@ -1716,73 +1784,92 @@ ; Disable selection timer MOVE CTEST7 | 0x10 TO CTEST7 -at 0x00000312 : */ 0x7a1b1000,0x00000000, +at 0x00000334 : */ 0x7a1b1000,0x00000000, /* WAIT RESELECT resel_err -at 0x00000314 : */ 0x50000000,0x00000c70, +at 0x00000336 : */ 0x50000000,0x00000cf8, /* INT int_resel_not_msgin, WHEN NOT MSG_IN -at 0x00000316 : */ 0x9f030000,0xab930016, +at 0x00000338 : */ 0x9f030000,0xab930016, /* MOVE 1, reselected_identify, WHEN MSG_IN -at 0x00000318 : */ 0x0f000001,0x00000000, +at 0x0000033a : */ 0x0f000001,0x00000000, /* INT int_reselected -at 0x0000031a : */ 0x98080000,0xab930017, +at 0x0000033c : */ 0x98080000,0xab930017, /* resel_err: MOVE CTEST2 & 0x40 TO SFBR -at 0x0000031c : */ 0x74164000,0x00000000, +at 0x0000033e : */ 0x74164000,0x00000000, /* JUMP selected, IF 0x00 -at 0x0000031e : */ 0x800c0000,0x00000cb0, +at 0x00000340 : */ 0x800c0000,0x00000d38, /* MOVE SFBR & 0 TO SFBR -at 0x00000320 : */ 0x7c080000,0x00000000, +at 0x00000342 : */ 0x7c080000,0x00000000, /* ENTRY patch_new_dsa patch_new_dsa: MOVE SFBR | 0x11 TO DSA0 -at 0x00000322 : */ 0x6a101100,0x00000000, +at 0x00000344 : */ 0x6a101100,0x00000000, /* MOVE SFBR | 0x22 TO DSA1 -at 0x00000324 : */ 0x6a112200,0x00000000, +at 0x00000346 : */ 0x6a112200,0x00000000, /* MOVE SFBR | 0x33 TO DSA2 -at 0x00000326 : */ 0x6a123300,0x00000000, +at 0x00000348 : */ 0x6a123300,0x00000000, /* MOVE SFBR | 0x44 TO DSA3 -at 0x00000328 : */ 0x6a134400,0x00000000, +at 0x0000034a : */ 0x6a134400,0x00000000, /* JUMP do_select -at 0x0000032a : */ 0x80080000,0x00000000, +at 0x0000034c : */ 0x80080000,0x00000000, /* selected: INT int_selected -at 0x0000032c : */ 0x98080000,0xab930018, +at 0x0000034e : */ 0x98080000,0xab930018, +/* + +ENTRY test1 +test1: + MOVE MEMORY 4, test1_src, test1_dst + +at 0x00000350 : */ 0xc0000004,0x00000000,0x00000000, +/* + INT int_test1 + +at 0x00000353 : */ 0x98080000,0xab93001d, +}; + +#define A_did_reject 0x00000001 +static u32 A_did_reject_used[] __attribute((unused)) = { + 0x0000026c, + 0x000002b8, + 0x00000304, }; #define A_dsa_cmnd 0x00000010 static u32 A_dsa_cmnd_used[] __attribute((unused)) = { - 0x0000001d, + 0x0000001b, }; #define A_dsa_datain 0x00000028 static u32 A_dsa_datain_used[] __attribute((unused)) = { + 0x0000003b, 0x0000003d, 0x0000003f, 0x00000041, @@ -1910,11 +1997,11 @@ 0x00000135, 0x00000137, 0x00000139, - 0x0000013b, }; #define A_dsa_dataout 0x00000428 static u32 A_dsa_dataout_used[] __attribute((unused)) = { + 0x00000141, 0x00000143, 0x00000145, 0x00000147, @@ -2042,25 +2129,24 @@ 0x0000023b, 0x0000023d, 0x0000023f, - 0x00000241, }; #define A_dsa_msgin 0x00000020 static u32 A_dsa_msgin_used[] __attribute((unused)) = { - 0x0000002f, + 0x0000002d, }; #define A_dsa_msgout 0x00000008 static u32 A_dsa_msgout_used[] __attribute((unused)) = { - 0x00000013, - 0x00000285, - 0x000002c5, - 0x00000305, + 0x00000011, + 0x0000028f, + 0x000002db, + 0x00000327, }; #define A_dsa_select 0x00000000 static u32 A_dsa_select_used[] __attribute((unused)) = { - 0x00000006, + 0x00000004, }; #define A_dsa_size 0x00000828 @@ -2069,285 +2155,290 @@ #define A_dsa_status 0x00000018 static u32 A_dsa_status_used[] __attribute((unused)) = { - 0x0000002b, + 0x00000029, }; #define A_had_cmdout 0x00000004 static u32 A_had_cmdout_used[] __attribute((unused)) = { - 0x0000001a, + 0x00000018, }; #define A_had_datain 0x00000008 static u32 A_had_datain_used[] __attribute((unused)) = { - 0x00000038, + 0x00000036, }; #define A_had_dataout 0x00000010 static u32 A_had_dataout_used[] __attribute((unused)) = { - 0x0000013e, + 0x0000013c, }; #define A_had_extmsg 0x00000080 static u32 A_had_extmsg_used[] __attribute((unused)) = { - 0x0000025a, - 0x0000029a, - 0x000002da, + 0x00000258, + 0x000002a4, + 0x000002f0, }; #define A_had_msgin 0x00000040 static u32 A_had_msgin_used[] __attribute((unused)) = { - 0x00000248, - 0x00000288, - 0x000002c8, + 0x00000246, + 0x00000292, + 0x000002de, }; #define A_had_msgout 0x00000002 static u32 A_had_msgout_used[] __attribute((unused)) = { - 0x00000010, - 0x00000282, - 0x000002c2, - 0x00000302, + 0x0000000e, + 0x0000028c, + 0x000002d8, + 0x00000324, }; #define A_had_select 0x00000001 static u32 A_had_select_used[] __attribute((unused)) = { - 0x0000000c, + 0x0000000a, }; #define A_had_status 0x00000020 static u32 A_had_status_used[] __attribute((unused)) = { }; -#define A_int_bad_extmsg1a 0xab930000 -static u32 A_int_bad_extmsg1a_used[] __attribute((unused)) = { - 0x00000263, -}; - -#define A_int_bad_extmsg1b 0xab930001 -static u32 A_int_bad_extmsg1b_used[] __attribute((unused)) = { - 0x0000026b, -}; - -#define A_int_bad_extmsg2a 0xab930002 -static u32 A_int_bad_extmsg2a_used[] __attribute((unused)) = { - 0x000002a3, -}; - -#define A_int_bad_extmsg2b 0xab930003 -static u32 A_int_bad_extmsg2b_used[] __attribute((unused)) = { - 0x000002ab, -}; - -#define A_int_bad_extmsg3a 0xab930004 -static u32 A_int_bad_extmsg3a_used[] __attribute((unused)) = { - 0x000002e3, -}; - -#define A_int_bad_extmsg3b 0xab930005 -static u32 A_int_bad_extmsg3b_used[] __attribute((unused)) = { - 0x000002eb, -}; - #define A_int_bad_msg1 0xab930006 static u32 A_int_bad_msg1_used[] __attribute((unused)) = { - 0x00000255, + 0x00000253, }; #define A_int_bad_msg2 0xab930007 static u32 A_int_bad_msg2_used[] __attribute((unused)) = { - 0x00000295, + 0x0000029f, }; #define A_int_bad_msg3 0xab930008 static u32 A_int_bad_msg3_used[] __attribute((unused)) = { - 0x000002d5, + 0x000002eb, }; #define A_int_cmd_bad_phase 0xab930009 static u32 A_int_cmd_bad_phase_used[] __attribute((unused)) = { - 0x00000027, + 0x00000025, }; #define A_int_cmd_complete 0xab93000a static u32 A_int_cmd_complete_used[] __attribute((unused)) = { - 0x00000037, + 0x00000035, }; #define A_int_data_bad_phase 0xab93000b static u32 A_int_data_bad_phase_used[] __attribute((unused)) = { - 0x00000247, + 0x00000245, }; #define A_int_disc1 0xab930019 static u32 A_int_disc1_used[] __attribute((unused)) = { - 0x00000277, + 0x00000281, }; #define A_int_disc2 0xab93001a static u32 A_int_disc2_used[] __attribute((unused)) = { - 0x000002b7, + 0x000002cd, }; #define A_int_disc3 0xab93001b static u32 A_int_disc3_used[] __attribute((unused)) = { - 0x000002f7, + 0x00000319, }; #define A_int_msg_sdtr1 0xab93000c static u32 A_int_msg_sdtr1_used[] __attribute((unused)) = { - 0x00000271, + 0x0000026b, }; #define A_int_msg_sdtr2 0xab93000d static u32 A_int_msg_sdtr2_used[] __attribute((unused)) = { - 0x000002b1, + 0x000002b7, }; #define A_int_msg_sdtr3 0xab93000e static u32 A_int_msg_sdtr3_used[] __attribute((unused)) = { - 0x000002f1, + 0x00000303, }; #define A_int_no_msgout1 0xab93000f static u32 A_int_no_msgout1_used[] __attribute((unused)) = { - 0x00000281, + 0x0000028b, }; #define A_int_no_msgout2 0xab930010 static u32 A_int_no_msgout2_used[] __attribute((unused)) = { - 0x000002c1, + 0x000002d7, }; #define A_int_no_msgout3 0xab930011 static u32 A_int_no_msgout3_used[] __attribute((unused)) = { - 0x00000301, + 0x00000323, }; #define A_int_not_cmd_complete 0xab930012 static u32 A_int_not_cmd_complete_used[] __attribute((unused)) = { - 0x00000031, + 0x0000002f, }; #define A_int_not_rej 0xab93001c static u32 A_int_not_rej_used[] __attribute((unused)) = { - 0x0000030d, + 0x0000032f, }; #define A_int_resel_not_msgin 0xab930016 static u32 A_int_resel_not_msgin_used[] __attribute((unused)) = { - 0x00000317, + 0x00000339, }; #define A_int_reselected 0xab930017 static u32 A_int_reselected_used[] __attribute((unused)) = { - 0x0000031b, + 0x0000033d, }; #define A_int_sel_no_ident 0xab930013 static u32 A_int_sel_no_ident_used[] __attribute((unused)) = { - 0x0000000f, + 0x0000000d, }; #define A_int_sel_not_cmd 0xab930014 static u32 A_int_sel_not_cmd_used[] __attribute((unused)) = { - 0x00000019, + 0x00000017, }; #define A_int_selected 0xab930018 static u32 A_int_selected_used[] __attribute((unused)) = { - 0x0000032d, + 0x0000034f, }; #define A_int_status_not_msgin 0xab930015 static u32 A_int_status_not_msgin_used[] __attribute((unused)) = { - 0x0000002d, + 0x0000002b, +}; + +#define A_int_test1 0xab93001d +static u32 A_int_test1_used[] __attribute((unused)) = { + 0x00000354, +}; + +#define A_msg_reject 0x00000000 +static u32 A_msg_reject_used[] __attribute((unused)) = { + 0x00000279, + 0x000002c5, + 0x00000311, }; #define A_msgin_buf 0x00000000 static u32 A_msgin_buf_used[] __attribute((unused)) = { - 0x0000024b, - 0x0000025f, - 0x00000267, - 0x0000026f, - 0x0000028b, - 0x0000029f, - 0x000002a7, + 0x00000249, + 0x0000025d, + 0x00000263, + 0x00000269, + 0x00000275, + 0x00000295, + 0x000002a9, 0x000002af, - 0x000002cb, - 0x000002df, - 0x000002e7, - 0x000002ef, - 0x0000030b, + 0x000002b5, + 0x000002c1, + 0x000002e1, + 0x000002f5, + 0x000002fb, + 0x00000301, + 0x0000030d, + 0x0000032d, }; #define A_reselected_identify 0x00000000 static u32 A_reselected_identify_used[] __attribute((unused)) = { - 0x00000319, + 0x0000033b, +}; + +#define A_test1_dst 0x00000000 +static u32 A_test1_dst_used[] __attribute((unused)) = { + 0x00000352, +}; + +#define A_test1_src 0x00000000 +static u32 A_test1_src_used[] __attribute((unused)) = { + 0x00000351, }; #define Ent_do_select 0x00000000 -#define Ent_done_ident 0x00000050 -#define Ent_end_data_trans 0x00000908 -#define Ent_patch_input_data 0x000000e8 -#define Ent_patch_new_dsa 0x00000c88 -#define Ent_patch_output_data 0x00000500 -#define Ent_reselect 0x00000c48 -#define Ent_resume_cmd 0x00000068 -#define Ent_resume_msgin1a 0x000009e0 -#define Ent_resume_msgin1b 0x000009f0 -#define Ent_resume_msgin2a 0x00000ae0 -#define Ent_resume_msgin2b 0x00000af0 -#define Ent_resume_msgin3a 0x00000be0 -#define Ent_resume_msgin3b 0x00000bf0 -#define Ent_resume_pmm 0x00000078 -#define Ent_resume_rej_ident 0x00000c20 -#define Ent_wait_disc1 0x000009d0 -#define Ent_wait_disc2 0x00000ad0 -#define Ent_wait_disc3 0x00000bd0 -#define Ent_wait_disc_complete 0x000000d0 +#define Ent_done_ident 0x00000048 +#define Ent_end_data_trans 0x00000900 +#define Ent_patch_input_data 0x000000e0 +#define Ent_patch_new_dsa 0x00000d10 +#define Ent_patch_output_data 0x000004f8 +#define Ent_reselect 0x00000cd0 +#define Ent_resume_cmd 0x00000060 +#define Ent_resume_msgin1a 0x00000a08 +#define Ent_resume_msgin1b 0x00000a18 +#define Ent_resume_msgin2a 0x00000b38 +#define Ent_resume_msgin2b 0x00000b48 +#define Ent_resume_msgin3a 0x00000c68 +#define Ent_resume_msgin3b 0x00000c78 +#define Ent_resume_pmm 0x00000070 +#define Ent_resume_rej_ident 0x00000ca8 +#define Ent_test1 0x00000d40 +#define Ent_wait_disc1 0x000009f8 +#define Ent_wait_disc2 0x00000b28 +#define Ent_wait_disc3 0x00000c58 +#define Ent_wait_disc_complete 0x000000c8 static u32 LABELPATCHES[] __attribute((unused)) = { + 0x00000005, 0x00000007, - 0x00000009, + 0x00000013, 0x00000015, - 0x00000017, + 0x0000001d, 0x0000001f, 0x00000021, 0x00000023, - 0x00000025, - 0x0000013d, + 0x0000013b, + 0x00000241, 0x00000243, - 0x00000245, + 0x0000024b, 0x0000024d, 0x0000024f, 0x00000251, - 0x00000253, - 0x00000259, - 0x00000261, - 0x00000269, + 0x00000257, + 0x0000025f, + 0x00000265, + 0x00000273, + 0x00000277, 0x0000027b, - 0x00000287, - 0x0000028d, - 0x0000028f, + 0x00000285, 0x00000291, - 0x00000293, + 0x00000297, 0x00000299, - 0x000002a1, - 0x000002a9, - 0x000002bb, + 0x0000029b, + 0x0000029d, + 0x000002a3, + 0x000002ab, + 0x000002b1, + 0x000002bf, + 0x000002c3, 0x000002c7, - 0x000002cd, - 0x000002cf, 0x000002d1, - 0x000002d3, - 0x000002d9, - 0x000002e1, + 0x000002dd, + 0x000002e3, + 0x000002e5, + 0x000002e7, 0x000002e9, - 0x000002fb, - 0x00000307, - 0x00000311, - 0x00000315, - 0x0000031f, - 0x0000032b, + 0x000002ef, + 0x000002f7, + 0x000002fd, + 0x0000030b, + 0x0000030f, + 0x00000313, + 0x0000031d, + 0x00000329, + 0x00000333, + 0x00000337, + 0x00000341, + 0x0000034d, }; static struct { @@ -2356,6 +2447,6 @@ } EXTERNAL_PATCHES[] __attribute((unused)) = { }; -static u32 INSTRUCTIONS __attribute((unused)) = 407; -static u32 PATCHES __attribute((unused)) = 42; +static u32 INSTRUCTIONS __attribute((unused)) = 426; +static u32 PATCHES __attribute((unused)) = 51; static u32 EXTERNAL_PATCHES_LEN __attribute((unused)) = 0; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/sound/ac97_codec.c linux.21pre4-ac2/drivers/sound/ac97_codec.c --- linux.21pre4/drivers/sound/ac97_codec.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/sound/ac97_codec.c 2003-01-31 11:02:32.000000000 +0000 @@ -133,6 +133,7 @@ {0x43525931, "Cirrus Logic CS4299 rev A", &crystal_digital_ops}, {0x43525933, "Cirrus Logic CS4299 rev C", &crystal_digital_ops}, {0x43525934, "Cirrus Logic CS4299 rev D", &crystal_digital_ops}, + {0x44543031, "Diamond Technology DT0893", &default_ops}, {0x45838308, "ESS Allegro ES1988", &null_ops}, {0x49434511, "ICE1232", &null_ops}, /* I hope --jk */ {0x4e534331, "National Semiconductor LM4549", &null_ops}, diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/sound/Config.in linux.21pre4-ac2/drivers/sound/Config.in --- linux.21pre4/drivers/sound/Config.in 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/sound/Config.in 2003-02-01 16:30:27.000000000 +0000 @@ -175,6 +175,7 @@ dep_tristate ' 100% Sound Blaster compatibles (SB16/32/64, ESS, Jazz16) support' CONFIG_SOUND_SB $CONFIG_SOUND_OSS dep_tristate ' AWE32 synth' CONFIG_SOUND_AWE32_SYNTH $CONFIG_SOUND_OSS + dep_tristate ' XpressAudio Sound Blaster emulation' CONFIG_SOUND_KAHLUA $CONFIG_SOUND_SB $CONFIG_SOUND_OSS dep_tristate ' Full support for Turtle Beach WaveFront (Tropez Plus, Tropez, Maui) synth/soundcards' CONFIG_SOUND_WAVEFRONT $CONFIG_SOUND_OSS m dep_tristate ' Limited support for Turtle Beach Wave Front (Maui, Tropez) synthesizers' CONFIG_SOUND_MAUI $CONFIG_SOUND_OSS if [ "$CONFIG_SOUND_MAUI" = "y" ]; then diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/sound/i810_audio.c linux.21pre4-ac2/drivers/sound/i810_audio.c --- linux.21pre4/drivers/sound/i810_audio.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/sound/i810_audio.c 2003-01-11 17:27:22.000000000 +0000 @@ -66,6 +66,9 @@ * * This driver is cursed. (Ben LaHaise) * + * ICH 3 caveats + * Intel errata #7 for ICH3 IO. We need to disable SMI stuff + * when codec probing. [Not Yet Done] * * ICH 4 caveats * diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/sound/kahlua.c linux.21pre4-ac2/drivers/sound/kahlua.c --- linux.21pre4/drivers/sound/kahlua.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/drivers/sound/kahlua.c 2003-02-04 17:12:42.000000000 +0000 @@ -0,0 +1,230 @@ +/* + * Initialisation code for Cyrix/NatSemi VSA1 softaudio + * + * (C) Copyright 2003 Red Hat Inc + * + * XpressAudio(tm) is used on the Cyrix MediaGX (now NatSemi Geode) systems. + * The older version (VSA1) provides fairly good soundblaster emulation + * although there are a couple of bugs: large DMA buffers break record, + * and the MPU event handling seems suspect. VSA2 allows the native driver + * to control the AC97 audio engine directly and requires a different driver. + * + * Thanks to National Semiconductor for providing the needed information + * on the XpressAudio(tm) internals. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * For the avoidance of doubt the "preferred form" of this code is one which + * is in an open non patent encumbered format. Where cryptographic key signing + * forms part of the process of creating an executable the information + * including keys needed to generate an equivalently functional executable + * are deemed to be part of the source code. + * + * TO DO: + * Investigate whether we can portably support Cognac (5520) in the + * same manner. + */ + +#include +#include +#include +#include + +#include "sound_config.h" + +#include "sb.h" + +/* + * Read a soundblaster compatible mixer register. + * In this case we are actually reading an SMI trap + * not real hardware. + */ + +static u8 __devinit mixer_read(unsigned long io, u8 reg) +{ + outb(reg, io + 4); + udelay(20); + reg = inb(io + 5); + udelay(20); + return reg; +} + +static int __devinit probe_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct address_info *hw_config; + unsigned long base; + void *mem; + unsigned long io; + u16 map; + u8 irq, dma8, dma16; + int oldquiet; + extern int sb_be_quiet; + + base = pci_resource_start(pdev, 0); + if(base == 0UL) + return 1; + + mem = ioremap(base, 128); + if(mem == 0UL) + return 1; + map = readw(mem + 0x18); /* Read the SMI enables */ + iounmap(mem); + + /* Map bits + 0:1 * 0x20 + 0x200 = sb base + 2 sb enable + 3 adlib enable + 5 MPU enable 0x330 + 6 MPU enable 0x300 + + The other bits may be used internally so must be masked */ + + io = 0x220 + 0x20 * (map & 3); + + if(map & (1<<2)) + printk(KERN_INFO "kahlua: XpressAudio at 0x%lx\n", io); + else + return 1; + + if(map & (1<<5)) + printk(KERN_INFO "kahlua: MPU at 0x300\n"); + else if(map & (1<<6)) + printk(KERN_INFO "kahlua: MPU at 0x330\n"); + + irq = mixer_read(io, 0x80) & 0x0F; + dma8 = mixer_read(io, 0x81); + + // printk("IRQ=%x MAP=%x DMA=%x\n", irq, map, dma8); + + if(dma8 & 0x20) + dma16 = 5; + else if(dma8 & 0x40) + dma16 = 6; + else if(dma8 & 0x80) + dma16 = 7; + else + { + printk(KERN_ERR "kahlua: No 16bit DMA enabled.\n"); + return 1; + } + + if(dma8 & 0x01) + dma8 = 0; + else if(dma8 & 0x02) + dma8 = 1; + else if(dma8 & 0x08) + dma8 = 3; + else + { + printk(KERN_ERR "kahlua: No 8bit DMA enabled.\n"); + return 1; + } + + if(irq & 1) + irq = 9; + else if(irq & 2) + irq = 5; + else if(irq & 4) + irq = 7; + else if(irq & 8) + irq = 10; + else + { + printk(KERN_ERR "kahlua: SB IRQ not set.\n"); + return 1; + } + + printk(KERN_INFO "kahlua: XpressAudio on IRQ %d, DMA %d, %d\n", + irq, dma8, dma16); + + hw_config = kmalloc(sizeof(struct address_info), GFP_KERNEL); + if(hw_config == NULL) + { + printk(KERN_ERR "kahlua: out of memory.\n"); + return 1; + } + memset(hw_config, 0, sizeof(*hw_config)); + + pci_set_drvdata(pdev, hw_config); + + hw_config->io_base = io; + hw_config->irq = irq; + hw_config->dma = dma8; + hw_config->dma2 = dma16; + hw_config->name = "Cyrix XpressAudio"; + hw_config->driver_use_1 = SB_NO_MIDI | SB_PCI_IRQ; + + if(sb_dsp_detect(hw_config, 0, 0, NULL)==0) + { + printk(KERN_ERR "kahlua: audio not responding.\n"); + return 1; + } + + oldquiet = sb_be_quiet; + sb_be_quiet = 1; + if(sb_dsp_init(hw_config, THIS_MODULE)) + { + sb_be_quiet = oldquiet; + pci_set_drvdata(pdev, NULL); + kfree(hw_config); + return 1; + } + sb_be_quiet = oldquiet; + + return 0; +} + +static void __exit remove_one(struct pci_dev *pdev) +{ + struct address_info *hw_config = pci_get_drvdata(pdev); + sb_dsp_unload(hw_config, 0); + pci_set_drvdata(pdev, NULL); + kfree(hw_config); +} + +MODULE_AUTHOR("Alan Cox"); +MODULE_DESCRIPTION("Kahlua VSA1 PCI Audio"); +MODULE_LICENSE("GPL"); + +/* + * 5530 only. The 5510/5520 decode is different. + */ + +static struct pci_device_id id_tbl[] __devinitdata = { + { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_AUDIO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { } +}; + +MODULE_DEVICE_TABLE(pci, id_tbl); + +static struct pci_driver kahlua_driver = { + name: "kahlua", + id_table: id_tbl, + probe: probe_one, + remove: __devexit_p(remove_one), +}; + + +static int __init kahlua_init_module(void) +{ + printk(KERN_INFO "Cyrix Kahlua VSA1 XpressAudio support (c) Copyright 2003 Red Hat Inc\n"); + return pci_module_init(&kahlua_driver); +} + +static void __exit kahlua_cleanup_module(void) +{ + return pci_unregister_driver(&kahlua_driver); +} + + +module_init(kahlua_init_module); +module_exit(kahlua_cleanup_module); + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/sound/Makefile linux.21pre4-ac2/drivers/sound/Makefile --- linux.21pre4/drivers/sound/Makefile 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/sound/Makefile 2003-02-01 16:31:20.000000000 +0000 @@ -33,6 +33,7 @@ obj-$(CONFIG_SOUND_MSS) += ad1848.o obj-$(CONFIG_SOUND_PAS) += pas2.o sb.o sb_lib.o uart401.o obj-$(CONFIG_SOUND_SB) += sb.o sb_lib.o uart401.o +obj-$(CONFIG_SOUND_KAHLUA) += kahlua.o obj-$(CONFIG_SOUND_WAVEFRONT) += wavefront.o obj-$(CONFIG_SOUND_MAUI) += maui.o mpu401.o obj-$(CONFIG_SOUND_MPU401) += mpu401.o diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/sound/sb_mixer.c linux.21pre4-ac2/drivers/sound/sb_mixer.c --- linux.21pre4/drivers/sound/sb_mixer.c 2003-01-29 16:26:50.000000000 +0000 +++ linux.21pre4-ac2/drivers/sound/sb_mixer.c 2003-01-12 00:32:36.000000000 +0000 @@ -479,9 +479,9 @@ regimageL |= sb16_recmasks_L[i]; regimageR |= sb16_recmasks_R[i]; } - sb_setmixer (devc, SB16_IMASK_L, regimageL); - sb_setmixer (devc, SB16_IMASK_R, regimageR); } + sb_setmixer (devc, SB16_IMASK_L, regimageL); + sb_setmixer (devc, SB16_IMASK_R, regimageR); } break; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/sound/trident.c linux.21pre4-ac2/drivers/sound/trident.c --- linux.21pre4/drivers/sound/trident.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/sound/trident.c 2003-02-04 14:58:24.000000000 +0000 @@ -223,7 +223,7 @@ #define TRIDENT_STATE_MAGIC 0x63657373 /* "cess" */ #define TRIDENT_DMA_MASK 0x3fffffff /* DMA buffer mask for pci_alloc_consist */ -#define ALI_DMA_MASK 0xffffffff /* ALI Tridents lack the 30-bit limitation */ +#define ALI_DMA_MASK 0x7fffffff /* ALI Tridents have 31-bit DMA. Wow. */ #define NR_HW_CH 32 @@ -3942,9 +3942,8 @@ return 0; udelay(5000); } - - printk(KERN_ERR "ALi 5451 did not come out of reset.\n"); - return 1; + /* This is non fatal if you have a non PM capable codec.. */ + return 0; } /* AC97 codec initialisation. */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/sound/via82cxxx_audio.c linux.21pre4-ac2/drivers/sound/via82cxxx_audio.c --- linux.21pre4/drivers/sound/via82cxxx_audio.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/sound/via82cxxx_audio.c 2003-01-10 17:29:26.000000000 +0000 @@ -146,6 +146,12 @@ #define VIA_INTR_FM ((1<<2) | (1<<6) | (1<<10)) #define VIA_INTR_MASK (VIA_INTR_OUT | VIA_INTR_IN | VIA_INTR_FM) +/* Newer VIA we need to monitor the low 3 bits of each channel. This + mask covers the channels we don't yet use as well + */ + +#define VIA_NEW_INTR_MASK 0x77077777UL + /* VIA_BASE0_AUDIO_xxx_CHAN_TYPE bits */ #define VIA_IRQ_ON_FLAG (1<<0) /* int on each flagged scatter block */ #define VIA_IRQ_ON_EOL (1<<1) /* int at end of scatter list */ @@ -300,12 +306,20 @@ unsigned legacy: 1; /* Has legacy ports */ unsigned intmask: 1; /* Needs int bits */ unsigned sixchannel: 1; /* 8233/35 with 6 channel support */ + unsigned volume: 1; int locked_rate : 1; + + int mixer_vol; /* 8233/35 volume - not yet implemented */ struct semaphore syscall_sem; struct semaphore open_sem; + /* The 8233/8235 have 4 DX audio channels, two record and + one six channel out. We bind ch_in to DX 1, ch_out to multichannel + and ch_fm to DX 2. DX 3 and REC0/REC1 are unused at the + moment */ + struct via_channel ch_in; struct via_channel ch_out; struct via_channel ch_fm; @@ -1555,7 +1569,30 @@ rc = via_syscall_down (card, nonblock); if (rc) goto out; - + +#if 0 + /* + * Intercept volume control on 8233 and 8235 + */ + if(card->volume) + { + switch(cmd) + { + case SOUND_MIXER_READ_VOLUME: + return card->mixer_vol; + case SOUND_MIXER_WRITE_VOLUME: + { + int v; + if(get_user(v, (int *)arg)) + { + rc = -EFAULT; + goto out; + } + card->mixer_vol = v; + } + } + } +#endif rc = codec->mixer_ioctl(codec, cmd, arg); up (&card->syscall_sem); @@ -1893,7 +1930,16 @@ static void via_new_interrupt(int irq, void *dev_id, struct pt_regs *regs) { struct via_info *card = dev_id; + u32 status32; + /* to minimize interrupt sharing costs, we use the SGD status + * shadow register to check the status of all inputs and + * outputs with a single 32-bit bus read. If no interrupt + * conditions are flagged, we exit immediately + */ + status32 = inl (card->baseaddr + VIA_BASE0_SGD_STATUS_SHADOW); + if (!(status32 & VIA_NEW_INTR_MASK)) + return; /* * goes away completely on UP */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/usb/devio.c linux.21pre4-ac2/drivers/usb/devio.c --- linux.21pre4/drivers/usb/devio.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/usb/devio.c 2003-01-06 17:29:33.000000000 +0000 @@ -43,7 +43,7 @@ #include #include #include - +#include struct async { struct list_head asynclist; @@ -1078,6 +1078,8 @@ int size; void *buf = 0; int retval = 0; + struct usb_interface *ifp = 0; + struct usb_driver *driver = 0; /* get input parameters and alloc buffer */ if (copy_from_user(&ctrl, (void *) arg, sizeof (ctrl))) @@ -1095,32 +1097,55 @@ } } - /* ioctl to device */ - if (ctrl.ifno < 0) { - switch (ctrl.ioctl_code) { - /* access/release token for issuing control messages - * ask a particular driver to bind/unbind, ... etc - */ - } - retval = -ENOSYS; - - /* ioctl to the driver which has claimed a given interface */ - } else { - struct usb_interface *ifp = 0; - if (!ps->dev) - retval = -ENODEV; - else if (ctrl.ifno >= ps->dev->actconfig->bNumInterfaces) + if (!ps->dev) + retval = -ENODEV; + else if (!(ifp = usb_ifnum_to_if (ps->dev, ctrl.ifno))) + retval = -EINVAL; + else switch (ctrl.ioctl_code) { + + /* disconnect kernel driver from interface, leaving it unbound */ + case USBDEVFS_DISCONNECT: + driver = ifp->driver; + if (driver) { + down (&driver->serialize); + dbg ("disconnect '%s' from dev %d interface %d", + driver->name, ps->dev->devnum, ctrl.ifno); + driver->disconnect (ps->dev, ifp->private_data); + usb_driver_release_interface (driver, ifp); + up (&driver->serialize); + } else retval = -EINVAL; - else { - if (!(ifp = usb_ifnum_to_if (ps->dev, ctrl.ifno))) - retval = -EINVAL; - else if (ifp->driver == 0 || ifp->driver->ioctl == 0) - retval = -ENOSYS; - } - if (retval == 0) + break; + + /* let kernel drivers try to (re)bind to the interface */ + case USBDEVFS_CONNECT: + usb_find_interface_driver_for_ifnum (ps->dev, ctrl.ifno); + break; + + /* talk directly to the interface's driver */ + default: + lock_kernel(); /* against module unload */ + driver = ifp->driver; + if (driver == 0 || driver->ioctl == 0) { + unlock_kernel(); + retval = -ENOSYS; + } else { + if (ifp->driver->owner) { + __MOD_INC_USE_COUNT(ifp->driver->owner); + unlock_kernel(); + } /* ifno might usefully be passed ... */ - retval = ifp->driver->ioctl (ps->dev, ctrl.ioctl_code, buf); + retval = driver->ioctl (ps->dev, ctrl.ioctl_code, buf); /* size = min_t(int, size, retval)? */ + if (ifp->driver->owner) { + __MOD_DEC_USE_COUNT(ifp->driver->owner); + } else { + unlock_kernel(); + } + } + + if (retval == -ENOIOCTLCMD) + retval = -ENOTTY; } /* cleanup and return */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/usb/hcd/ehci-dbg.c linux.21pre4-ac2/drivers/usb/hcd/ehci-dbg.c --- linux.21pre4/drivers/usb/hcd/ehci-dbg.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/usb/hcd/ehci-dbg.c 2003-02-04 15:00:43.000000000 +0000 @@ -287,7 +287,26 @@ default: tmp = '?'; break; \ }; tmp; }) -static void qh_lines (struct ehci_qh *qh, char **nextp, unsigned *sizep) +static inline char token_mark (u32 token) +{ + token = le32_to_cpu (token); + if (token & QTD_STS_ACTIVE) + return '*'; + if (token & QTD_STS_HALT) + return '-'; + if (QTD_PID (token) != 1 /* not IN: OUT or SETUP */ + || QTD_LENGTH (token) == 0) + return ' '; + /* tries to advance through hw_alt_next */ + return '/'; +} + +static void qh_lines ( + struct ehci_hcd *ehci, + struct ehci_qh *qh, + char **nextp, + unsigned *sizep +) { u32 scratch; u32 hw_curr; @@ -296,26 +315,49 @@ unsigned temp; unsigned size = *sizep; char *next = *nextp; + char mark; + mark = token_mark (qh->hw_token); + if (mark == '/') { /* qh_alt_next controls qh advance? */ + if ((qh->hw_alt_next & QTD_MASK) == ehci->async->hw_alt_next) + mark = '#'; /* blocked */ + else if (qh->hw_alt_next & cpu_to_le32 (0x01)) + mark = '.'; /* use hw_qtd_next */ + /* else alt_next points to some other qtd */ + } scratch = cpu_to_le32p (&qh->hw_info1); - hw_curr = cpu_to_le32p (&qh->hw_current); + hw_curr = (mark == '*') ? cpu_to_le32p (&qh->hw_current) : 0; temp = snprintf (next, size, - "qh/%p dev%d %cs ep%d %08x %08x (%08x %08x)", + "qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)", qh, scratch & 0x007f, speed_char (scratch), (scratch >> 8) & 0x000f, scratch, cpu_to_le32p (&qh->hw_info2), - hw_curr, cpu_to_le32p (&qh->hw_token)); + cpu_to_le32p (&qh->hw_token), mark, + (cpu_to_le32 (0x8000000) & qh->hw_token) + ? "data0" : "data1", + (cpu_to_le32p (&qh->hw_alt_next) >> 1) & 0x0f); size -= temp; next += temp; + /* hc may be modifying the list as we read it ... */ list_for_each (entry, &qh->qtd_list) { td = list_entry (entry, struct ehci_qtd, qtd_list); scratch = cpu_to_le32p (&td->hw_token); + mark = ' '; + if (hw_curr == td->qtd_dma) + mark = '*'; + else if (qh->hw_qtd_next == td->qtd_dma) + mark = '+'; + else if (QTD_LENGTH (scratch)) { + if (td->hw_alt_next == ehci->async->hw_alt_next) + mark = '#'; + else if (td->hw_alt_next != EHCI_LIST_END) + mark = '/'; + } temp = snprintf (next, size, - "\n\t%std/%p %s len=%d %08x urb %p", - (hw_curr == td->qtd_dma) ? "*" : "", - td, ({ char *tmp; + "\n\t%p%c%s len=%d %08x urb %p", + td, mark, ({ char *tmp; switch ((scratch>>8)&0x03) { case 0: tmp = "out"; break; case 1: tmp = "in"; break; @@ -325,17 +367,31 @@ (scratch >> 16) & 0x7fff, scratch, td->urb); + if (temp < 0) + temp = 0; + else if (size < temp) + temp = size; size -= temp; next += temp; + if (temp == size) + goto done; } temp = snprintf (next, size, "\n"); - *sizep = size - temp; - *nextp = next + temp; + if (temp < 0) + temp = 0; + else if (size < temp) + temp = size; + size -= temp; + next += temp; + +done: + *sizep = size; + *nextp = next; } static ssize_t -show_async (struct device *dev, char *buf, size_t count, loff_t off) +show_async (struct device *dev, char *buf) { struct pci_dev *pdev; struct ehci_hcd *ehci; @@ -344,38 +400,36 @@ char *next; struct ehci_qh *qh; - if (off != 0) - return 0; - pdev = container_of (dev, struct pci_dev, dev); ehci = container_of (pci_get_drvdata (pdev), struct ehci_hcd, hcd); next = buf; - size = count; + size = PAGE_SIZE; /* dumps a snapshot of the async schedule. * usually empty except for long-term bulk reads, or head. * one QH per line, and TDs we know about */ spin_lock_irqsave (&ehci->lock, flags); - for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) - qh_lines (qh, &next, &size); - if (ehci->reclaim) { + for (qh = ehci->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh) + qh_lines (ehci, qh, &next, &size); + if (ehci->reclaim && size > 0) { temp = snprintf (next, size, "\nreclaim =\n"); size -= temp; next += temp; - qh_lines (ehci->reclaim, &next, &size); + for (qh = ehci->reclaim; size > 0 && qh; qh = qh->reclaim) + qh_lines (ehci, qh, &next, &size); } spin_unlock_irqrestore (&ehci->lock, flags); - return count - size; + return PAGE_SIZE - size; } static DEVICE_ATTR (async, S_IRUGO, show_async, NULL); #define DBG_SCHED_LIMIT 64 static ssize_t -show_periodic (struct device *dev, char *buf, size_t count, loff_t off) +show_periodic (struct device *dev, char *buf) { struct pci_dev *pdev; struct ehci_hcd *ehci; @@ -385,8 +439,6 @@ char *next; unsigned i, tag; - if (off != 0) - return 0; if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, SLAB_ATOMIC))) return 0; seen_count = 0; @@ -394,7 +446,7 @@ pdev = container_of (dev, struct pci_dev, dev); ehci = container_of (pci_get_drvdata (pdev), struct ehci_hcd, hcd); next = buf; - size = count; + size = PAGE_SIZE; temp = snprintf (next, size, "size = %d\n", ehci->periodic_size); size -= temp; @@ -436,7 +488,7 @@ scratch & 0x007f, (scratch >> 8) & 0x000f, p.qh->usecs, p.qh->c_usecs, - scratch >> 16); + 0x7ff & (scratch >> 16)); /* FIXME TD info too */ @@ -478,14 +530,14 @@ spin_unlock_irqrestore (&ehci->lock, flags); kfree (seen); - return count - size; + return PAGE_SIZE - size; } static DEVICE_ATTR (periodic, S_IRUGO, show_periodic, NULL); #undef DBG_SCHED_LIMIT static ssize_t -show_registers (struct device *dev, char *buf, size_t count, loff_t off) +show_registers (struct device *dev, char *buf) { struct pci_dev *pdev; struct ehci_hcd *ehci; @@ -495,20 +547,18 @@ static char fmt [] = "%*s\n"; static char label [] = ""; - if (off != 0) - return 0; - pdev = container_of (dev, struct pci_dev, dev); ehci = container_of (pci_get_drvdata (pdev), struct ehci_hcd, hcd); next = buf; - size = count; + size = PAGE_SIZE; spin_lock_irqsave (&ehci->lock, flags); /* Capability Registers */ i = readw (&ehci->caps->hci_version); - temp = snprintf (next, size, "EHCI %x.%02x, hcd state %d\n", + temp = snprintf (next, size, + "EHCI %x.%02x, hcd state %d (version " DRIVER_VERSION ")\n", i >> 8, i & 0x0ff, ehci->hcd.state); size -= temp; next += temp; @@ -578,7 +628,7 @@ spin_unlock_irqrestore (&ehci->lock, flags); - return count - size; + return PAGE_SIZE - size; } static DEVICE_ATTR (registers, S_IRUGO, show_registers, NULL); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/usb/hcd/ehci.h linux.21pre4-ac2/drivers/usb/hcd/ehci.h --- linux.21pre4/drivers/usb/hcd/ehci.h 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/usb/hcd/ehci.h 2003-02-04 15:00:43.000000000 +0000 @@ -81,6 +81,7 @@ struct pci_pool *sitd_pool; /* sitd per split iso urb */ struct timer_list watchdog; + unsigned stamp; #ifdef EHCI_STATS struct ehci_stats stats; @@ -235,12 +236,12 @@ /* the rest is HCD-private */ dma_addr_t qtd_dma; /* qtd address */ struct list_head qtd_list; /* sw qtd list */ - - /* dma same in urb's qtds, except 1st control qtd (setup buffer) */ struct urb *urb; /* qtd's urb */ size_t length; /* length of buffer */ } __attribute__ ((aligned (32))); +#define QTD_MASK cpu_to_le32 (~0x1f) /* mask NakCnt+T in qh->hw_alt_next */ + /*-------------------------------------------------------------------------*/ /* type tag from {qh,itd,sitd,fstn}->hw_next */ @@ -304,13 +305,17 @@ union ehci_shadow qh_next; /* ptr to qh; or periodic */ struct list_head qtd_list; /* sw qtd list */ struct ehci_qtd *dummy; + struct ehci_qh *reclaim; /* next to reclaim */ atomic_t refcount; + unsigned stamp; u8 qh_state; #define QH_STATE_LINKED 1 /* HC sees this */ #define QH_STATE_UNLINK 2 /* HC may still see this */ #define QH_STATE_IDLE 3 /* HC doesn't see this */ +#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on reclaim q */ +#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */ /* periodic schedule info */ u8 usecs; /* intr bandwidth */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/usb/hcd/ehci-hcd.c linux.21pre4-ac2/drivers/usb/hcd/ehci-hcd.c --- linux.21pre4/drivers/usb/hcd/ehci-hcd.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/usb/hcd/ehci-hcd.c 2003-02-04 15:00:43.000000000 +0000 @@ -93,7 +93,7 @@ * 2001-June Works with usb-storage and NEC EHCI on 2.4 */ -#define DRIVER_VERSION "2002-Dec-20" +#define DRIVER_VERSION "2003-Jan-22" #define DRIVER_AUTHOR "David Brownell" #define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver" @@ -107,14 +107,15 @@ #define EHCI_STATS #endif -#define INTR_AUTOMAGIC /* to be removed later in 2.5 */ +#define INTR_AUTOMAGIC /* urb lifecycle mode, gone in 2.5 */ /* magic numbers that can affect system performance */ #define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */ -#define EHCI_TUNE_RL_HS 0 /* nak throttle; see 4.9 */ +#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */ #define EHCI_TUNE_RL_TT 0 #define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */ #define EHCI_TUNE_MULT_TT 1 +#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */ #define EHCI_WATCHDOG_JIFFIES (HZ/100) /* arbitrary; ~10 msec */ #define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */ @@ -406,9 +407,10 @@ * streaming mappings for I/O buffers, like pci_map_single(), * can return segments above 4GB, if the device allows. * - * NOTE: layered drivers can't yet tell when we enable that, - * so they can't pass this info along (like NETIF_F_HIGHDMA) - * (or like Scsi_Host.highmem_io) ... usb_bus.flags? + * NOTE: the dma mask is visible through dma_supported(), so + * drivers can pass this info along ... like NETIF_F_HIGHDMA, + * Scsi_Host.highmem_io, and so forth. It's readonly to all + * host side drivers though. */ if (HCC_64BIT_ADDR (hcc_params)) { writel (0, &ehci->regs->segment); @@ -416,13 +418,26 @@ ehci_info (ehci, "enabled 64bit PCI DMA\n"); } + /* help hc dma work well with cachelines */ + pci_set_mwi (ehci->hcd.pdev); + /* clear interrupt enables, set irq latency */ temp = readl (&ehci->regs->command) & 0xff; if (log2_irq_thresh < 0 || log2_irq_thresh > 6) log2_irq_thresh = 0; temp |= 1 << (16 + log2_irq_thresh); // if hc can park (ehci >= 0.96), default is 3 packets per async QH - // keeping default periodic framelist size + if (HCC_PGM_FRAMELISTLEN (hcc_params)) { + /* periodic schedule size can be smaller than default */ + temp &= ~(3 << 2); + temp |= (EHCI_TUNE_FLS << 2); + switch (EHCI_TUNE_FLS) { + case 0: ehci->periodic_size = 1024; break; + case 1: ehci->periodic_size = 512; break; + case 2: ehci->periodic_size = 256; break; + default: BUG (); + } + } temp &= ~(CMD_IAAD | CMD_ASE | CMD_PSE), // Philips, Intel, and maybe others need CMD_RUN before the // root hub will detect new devices (why?); NEC doesn't @@ -476,7 +491,6 @@ ehci_ready (ehci); ehci_reset (ehci); bus->root_hub = 0; - usb_free_dev (udev); retval = -ENODEV; goto done2; } @@ -637,9 +651,13 @@ static void ehci_irq (struct usb_hcd *hcd, struct pt_regs *regs) { struct ehci_hcd *ehci = hcd_to_ehci (hcd); - u32 status = readl (&ehci->regs->status); + u32 status; int bh; + spin_lock (&ehci->lock); + + status = readl (&ehci->regs->status); + /* e.g. cardbus physical eject */ if (status == ~(u32) 0) { ehci_dbg (ehci, "device removed\n"); @@ -648,9 +666,7 @@ status &= INTR_MASK; if (!status) /* irq sharing? */ - return; - - spin_lock (&ehci->lock); + goto done; /* clear (just) interrupts */ writel (status, &ehci->regs->status); @@ -693,6 +709,7 @@ if (bh) ehci_work (ehci, regs); +done: spin_unlock (&ehci->lock); } @@ -756,7 +773,6 @@ struct ehci_hcd *ehci = hcd_to_ehci (hcd); struct ehci_qh *qh; unsigned long flags; - int maybe_irq = 1; spin_lock_irqsave (&ehci->lock, flags); switch (usb_pipetype (urb->pipe)) { @@ -766,23 +782,23 @@ qh = (struct ehci_qh *) urb->hcpriv; if (!qh) break; - while (qh->qh_state == QH_STATE_LINKED + + /* if we need to use IAA and it's busy, defer */ + if (qh->qh_state == QH_STATE_LINKED && ehci->reclaim && HCD_IS_RUNNING (ehci->hcd.state) ) { - spin_unlock_irqrestore (&ehci->lock, flags); + struct ehci_qh *last; - if (maybe_irq) { - if (in_interrupt ()) - return -EAGAIN; - maybe_irq = 0; - } - /* let pending unlinks complete, so this can start */ - wait_ms (1); + for (last = ehci->reclaim; + last->reclaim; + last = last->reclaim) + continue; + qh->qh_state = QH_STATE_UNLINK_WAIT; + last->reclaim = qh; - spin_lock_irqsave (&ehci->lock, flags); - } - if (!HCD_IS_RUNNING (ehci->hcd.state) && ehci->reclaim) + /* bypass IAA if the hc can't care */ + } else if (!HCD_IS_RUNNING (ehci->hcd.state) && ehci->reclaim) end_unlink_async (ehci, NULL); /* something else might have unlinked the qh by now */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/usb/hcd/ehci-mem.c linux.21pre4-ac2/drivers/usb/hcd/ehci-mem.c --- linux.21pre4/drivers/usb/hcd/ehci-mem.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/usb/hcd/ehci-mem.c 2003-02-04 15:00:43.000000000 +0000 @@ -75,8 +75,6 @@ qtd = pci_pool_alloc (ehci->qtd_pool, flags, &dma); if (qtd != 0) { ehci_qtd_init (qtd, dma); - if (ehci->async) - qtd->hw_alt_next = ehci->async->hw_alt_next; } return qtd; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/usb/hcd/ehci-q.c linux.21pre4-ac2/drivers/usb/hcd/ehci-q.c --- linux.21pre4/drivers/usb/hcd/ehci-q.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/usb/hcd/ehci-q.c 2003-02-04 15:00:43.000000000 +0000 @@ -43,7 +43,8 @@ /* fill a qtd, returning how much of the buffer we were able to queue up */ static int -qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len, int token) +qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len, + int token, int maxpacket) { int i, count; u64 addr = buf; @@ -69,6 +70,10 @@ else count = len; } + + /* short packets may only terminate transfers */ + if (count != len) + count -= (count % maxpacket); } qtd->hw_token = cpu_to_le32 ((count << 16) | token); qtd->length = count; @@ -85,7 +90,7 @@ { qh->hw_current = 0; qh->hw_qtd_next = QTD_NEXT (qtd->qtd_dma); - qh->hw_alt_next = ehci->async->hw_alt_next; + qh->hw_alt_next = EHCI_LIST_END; /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ wmb (); @@ -96,7 +101,7 @@ #define IS_SHORT_READ(token) (QTD_LENGTH (token) != 0 && QTD_PID (token) == 1) -static inline void qtd_copy_status ( +static void qtd_copy_status ( struct ehci_hcd *ehci, struct urb *urb, size_t length, @@ -256,14 +261,26 @@ static unsigned qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh, struct pt_regs *regs) { - struct ehci_qtd *last = 0; + struct ehci_qtd *last = 0, *end = qh->dummy; struct list_head *entry, *tmp; - int stopped = 0; + int stopped; unsigned count = 0; + int do_status = 0; + u8 state; if (unlikely (list_empty (&qh->qtd_list))) return count; + /* completions (or tasks on other cpus) must never clobber HALT + * till we've gone through and cleaned everything up, even when + * they add urbs to this qh's queue or mark them for unlinking. + * + * NOTE: unlinking expects to be done in queue order. + */ + state = qh->qh_state; + qh->qh_state = QH_STATE_COMPLETING; + stopped = (state == QH_STATE_IDLE); + /* remove de-activated QTDs from front of queue. * after faults (including short reads), cleanup this urb * then let the queue advance. @@ -287,11 +304,14 @@ last = 0; } + /* ignore urbs submitted during completions we reported */ + if (qtd == end) + break; + /* hardware copies qtd out of qh overlay */ rmb (); token = le32_to_cpu (qtd->hw_token); stopped = stopped - || (qh->qh_state == QH_STATE_IDLE) || (HALT_BIT & qh->hw_token) != 0 || (ehci->hcd.state == USB_STATE_HALT); @@ -301,36 +321,53 @@ /* magic dummy for short reads; won't advance */ if (IS_SHORT_READ (token) && !(token & QTD_STS_HALT) - && ehci->async->hw_alt_next - == qh->hw_alt_next) + && (qh->hw_alt_next & QTD_MASK) + == ehci->async->hw_alt_next) { + stopped = 1; goto halt; + } /* stop scanning when we reach qtds the hc is using */ } else if (likely (!stopped)) { - last = 0; break; } else { - /* ignore active qtds unless some previous qtd + /* ignore active urbs unless some previous qtd * for the urb faulted (including short read) or * its urb was canceled. we may patch qh or qtds. */ - if ((token & QTD_STS_ACTIVE) - && urb->status == -EINPROGRESS) { - last = 0; + if (likely (urb->status == -EINPROGRESS)) + continue; + + /* issue status after short control reads */ + if (unlikely (do_status != 0) + && QTD_PID (token) == 0 /* OUT */) { + do_status = 0; continue; } + + /* token in overlay may be most current */ + if (state == QH_STATE_IDLE + && cpu_to_le32 (qtd->qtd_dma) + == qh->hw_current) + token = le32_to_cpu (qh->hw_token); + + /* force halt for unlinked or blocked qh, so we'll + * patch the qh later and so that completions can't + * activate it while we "know" it's stopped. + */ if ((HALT_BIT & qh->hw_token) == 0) { halt: qh->hw_token |= HALT_BIT; wmb (); - stopped = 1; } } /* remove it from the queue */ spin_lock (&urb->lock); qtd_copy_status (ehci, urb, qtd->length, token); + do_status = (urb->status == -EREMOTEIO) + && usb_pipecontrol (urb->pipe); spin_unlock (&urb->lock); if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { @@ -349,6 +386,9 @@ ehci_qtd_free (ehci, last); } + /* restore original state; caller must unlink or relink */ + qh->qh_state = state; + /* update qh after fault cleanup */ if (unlikely ((HALT_BIT & qh->hw_token) != 0)) { qh_update (ehci, qh, @@ -397,7 +437,7 @@ struct ehci_qtd *qtd, *qtd_prev; dma_addr_t buf; int len, maxpacket; - int is_input, status_patch = 0; + int is_input; u32 token; /* @@ -418,7 +458,7 @@ if (usb_pipecontrol (urb->pipe)) { /* SETUP pid */ qtd_fill (qtd, urb->setup_dma, sizeof (struct usb_ctrlrequest), - token | (2 /* "setup" */ << 8)); + token | (2 /* "setup" */ << 8), 8); /* ... and always at least one more pid */ token ^= QTD_TOGGLE; @@ -429,10 +469,6 @@ qtd->urb = urb; qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma); list_add_tail (&qtd->qtd_list, head); - - if (len > 0 && is_input - && !(urb->transfer_flags & URB_SHORT_NOT_OK)) - status_patch = 1; } /* @@ -443,6 +479,7 @@ else buf = 0; + // FIXME this 'buf' check break some zlps... if (!buf || is_input) token |= (1 /* "in" */ << 8); /* else it's already initted to "out" pid (0 << 8) */ @@ -457,9 +494,11 @@ for (;;) { int this_qtd_len; - this_qtd_len = qtd_fill (qtd, buf, len, token); + this_qtd_len = qtd_fill (qtd, buf, len, token, maxpacket); len -= this_qtd_len; buf += this_qtd_len; + if (is_input) + qtd->hw_alt_next = ehci->async->hw_alt_next; /* qh makes control packets use qtd toggle; maybe switch it */ if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) @@ -477,6 +516,13 @@ list_add_tail (&qtd->qtd_list, head); } + /* unless the bulk/interrupt caller wants a chance to clean + * up after short reads, hc should advance qh past this urb + */ + if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 + || usb_pipecontrol (urb->pipe))) + qtd->hw_alt_next = EHCI_LIST_END; + /* * control requests may need a terminating data "status" ack; * bulk ones may need a terminating short packet (zero length). @@ -503,23 +549,10 @@ list_add_tail (&qtd->qtd_list, head); /* never any data in such packets */ - qtd_fill (qtd, 0, 0, token); + qtd_fill (qtd, 0, 0, token, 0); } } - /* if we're permitting a short control read, we want the hardware to - * just continue after short data and send the status ack. it can do - * that on the last data packet (typically the only one). for other - * packets, software fixup is needed (in qh_completions). - */ - if (status_patch) { - struct ehci_qtd *prev; - - prev = list_entry (qtd->qtd_list.prev, - struct ehci_qtd, qtd_list); - prev->hw_alt_next = QTD_NEXT (qtd->qtd_dma); - } - /* by default, enable interrupt on urb completion */ if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT))) qtd->hw_token |= __constant_cpu_to_le32 (QTD_IOC); @@ -641,7 +674,8 @@ case USB_SPEED_FULL: /* EPS 0 means "full" */ - info1 |= (EHCI_TUNE_RL_TT << 28); + if (type != PIPE_INTERRUPT) + info1 |= (EHCI_TUNE_RL_TT << 28); if (type == PIPE_CONTROL) { info1 |= (1 << 27); /* for TT */ info1 |= 1 << 14; /* toggle from qtd */ @@ -658,12 +692,13 @@ case USB_SPEED_HIGH: /* no TT involved */ info1 |= (2 << 12); /* EPS "high" */ - info1 |= (EHCI_TUNE_RL_HS << 28); if (type == PIPE_CONTROL) { + info1 |= (EHCI_TUNE_RL_HS << 28); info1 |= 64 << 16; /* usb2 fixed maxpacket */ info1 |= 1 << 14; /* toggle from qtd */ info2 |= (EHCI_TUNE_MULT_HS << 30); } else if (type == PIPE_BULK) { + info1 |= (EHCI_TUNE_RL_HS << 28); info1 |= 512 << 16; /* usb2 fixed maxpacket */ info2 |= (EHCI_TUNE_MULT_HS << 30); } else { /* PIPE_INTERRUPT */ @@ -799,8 +834,7 @@ && !usb_pipecontrol (urb->pipe)) { /* "never happens": drivers do stall cleanup right */ if (qh->qh_state != QH_STATE_IDLE - && (cpu_to_le32 (QTD_STS_HALT) - & qh->hw_token) == 0) + && qh->qh_state != QH_STATE_COMPLETING) ehci_warn (ehci, "clear toggle dev%d " "ep%d%s: not idle\n", usb_pipedevice (urb->pipe), @@ -839,7 +873,6 @@ __list_splice (qtd_list, qh->qtd_list.prev); ehci_qtd_init (qtd, qtd->qtd_dma); - qtd->hw_alt_next = ehci->async->hw_alt_next; qh->dummy = qtd; /* hc must see the new dummy at list end */ @@ -907,9 +940,12 @@ /* the async qh for the qtds being reclaimed are now unlinked from the HC */ +static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh); + static void end_unlink_async (struct ehci_hcd *ehci, struct pt_regs *regs) { struct ehci_qh *qh = ehci->reclaim; + struct ehci_qh *next; del_timer (&ehci->watchdog); @@ -920,6 +956,10 @@ ehci->reclaim = 0; ehci->reclaim_ready = 0; + /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */ + next = qh->reclaim; + qh->reclaim = 0; + qh_completions (ehci, qh, regs); if (!list_empty (&qh->qtd_list) @@ -939,6 +979,9 @@ jiffies + EHCI_ASYNC_JIFFIES); } } + + if (next) + start_unlink_async (ehci, next); } /* makes sure the async qh will become idle */ @@ -951,7 +994,8 @@ #ifdef DEBUG if (ehci->reclaim - || qh->qh_state != QH_STATE_LINKED + || (qh->qh_state != QH_STATE_LINKED + && qh->qh_state != QH_STATE_UNLINK_WAIT) #ifdef CONFIG_SMP // this macro lies except on SMP compiles || !spin_is_locked (&ehci->lock) @@ -983,6 +1027,9 @@ wmb (); if (unlikely (ehci->hcd.state == USB_STATE_HALT)) { + /* if (unlikely (qh->reclaim != 0)) + * this will recurse, probably not much + */ end_unlink_async (ehci, NULL); return; } @@ -1001,25 +1048,28 @@ scan_async (struct ehci_hcd *ehci, struct pt_regs *regs) { struct ehci_qh *qh; - unsigned count; + if (!++(ehci->stamp)) + ehci->stamp++; rescan: qh = ehci->async->qh_next.qh; - count = 0; if (likely (qh != 0)) { do { /* clean any finished work for this qh */ - if (!list_empty (&qh->qtd_list)) { + if (!list_empty (&qh->qtd_list) + && qh->stamp != ehci->stamp) { int temp; /* unlinks could happen here; completion - * reporting drops the lock. + * reporting drops the lock. rescan using + * the latest schedule, but don't rescan + * qhs we already finished (no looping). */ qh = qh_get (qh); + qh->stamp = ehci->stamp; temp = qh_completions (ehci, qh, regs); qh_put (ehci, qh); if (temp != 0) { - count += temp; goto rescan; } } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/usb/usb.c linux.21pre4-ac2/drivers/usb/usb.c --- linux.21pre4/drivers/usb/usb.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/drivers/usb/usb.c 2003-01-06 17:29:40.000000000 +0000 @@ -164,6 +164,26 @@ } } +/* + * usb_ifnum_to_ifpos - convert the interface _number_ (as in interface.bInterfaceNumber) + * to the interface _position_ (as in dev->actconfig->interface + position) + * @dev: the device to use + * @ifnum: the interface number (bInterfaceNumber); not interface position + * + * Note that the number is the same as the position for all interfaces _except_ + * devices with interfaces not sequentially numbered (e.g., 0, 2, 3, etc). + */ +int usb_ifnum_to_ifpos(struct usb_device *dev, unsigned ifnum) +{ + int i; + + for (i = 0; i < dev->actconfig->bNumInterfaces; i++) + if (dev->actconfig->interface[i].altsetting[0].bInterfaceNumber == ifnum) + return i; + + return -EINVAL; +} + /** * usb_deregister - unregister a USB driver * @driver: USB operations of the driver to unregister @@ -546,7 +566,6 @@ iface->private_data = NULL; } - /** * usb_match_id - find first usb_device_id matching device or interface * @dev: the device whose descriptors are considered when matching @@ -759,6 +778,23 @@ return -1; } +/* + * usb_find_interface_driver_for_ifnum - convert ifnum to ifpos via + * usb_ifnum_to_ifpos and call usb_find_interface_driver(). + * @dev: the device to use + * @ifnum: the interface number (bInterfaceNumber); not interface position! + * + * Note usb_find_interface_driver's ifnum parameter is actually interface position. + */ +int usb_find_interface_driver_for_ifnum(struct usb_device *dev, unsigned int ifnum) +{ + int ifpos = usb_ifnum_to_ifpos(dev, ifnum); + + if (0 > ifpos) + return -EINVAL; + + return usb_find_interface_driver(dev, ifpos); +} #ifdef CONFIG_HOTPLUG @@ -2382,6 +2418,7 @@ * into the kernel, and other device drivers are built as modules, * then these symbols need to be exported for the modules to use. */ +EXPORT_SYMBOL(usb_ifnum_to_ifpos); EXPORT_SYMBOL(usb_ifnum_to_if); EXPORT_SYMBOL(usb_epnum_to_ep_desc); @@ -2396,6 +2433,7 @@ EXPORT_SYMBOL(usb_free_dev); EXPORT_SYMBOL(usb_inc_dev_use); +EXPORT_SYMBOL(usb_find_interface_driver_for_ifnum); EXPORT_SYMBOL(usb_driver_claim_interface); EXPORT_SYMBOL(usb_interface_claimed); EXPORT_SYMBOL(usb_driver_release_interface); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/video/aty/atyfb_base.c linux.21pre4-ac2/drivers/video/aty/atyfb_base.c --- linux.21pre4/drivers/video/aty/atyfb_base.c 2003-01-29 16:26:59.000000000 +0000 +++ linux.21pre4-ac2/drivers/video/aty/atyfb_base.c 2003-01-06 17:41:28.000000000 +0000 @@ -360,6 +360,7 @@ /* 3D RAGE Mobility */ { 0x4c4d, 0x4c4d, 0x00, 0x00, m64n_mob_p, 230, 50, M64F_GT | M64F_INTEGRATED | M64F_RESET_3D | M64F_GTB_DSP | M64F_MOBIL_BUS }, + { 0x4c52, 0x4c52, 0x00, 0x00, m64n_mob_p, 230, 40, M64F_GT | M64F_INTEGRATED | M64F_RESET_3D | M64F_GTB_DSP | M64F_MOBIL_BUS | M64F_MAGIC_POSTDIV | M64F_SDRAM_MAGIC_PLL | M64F_XL_DLL }, { 0x4c4e, 0x4c4e, 0x00, 0x00, m64n_mob_a, 230, 50, M64F_GT | M64F_INTEGRATED | M64F_RESET_3D | M64F_GTB_DSP | M64F_MOBIL_BUS }, #endif /* CONFIG_FB_ATY_CT */ }; @@ -438,7 +439,7 @@ #endif /* defined(CONFIG_PPC) */ -#if defined(CONFIG_PMAC_PBOOK) || defined(CONFIG_PMAC_BACKLIGHT) +#if defined(CONFIG_PMAC_PBOOK) || defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_CT_VAIO_LCD) static void aty_st_lcd(int index, u32 val, const struct fb_info_aty *info) { unsigned long temp; @@ -460,7 +461,7 @@ /* read the register value */ return aty_ld_le32(LCD_DATA, info); } -#endif /* CONFIG_PMAC_PBOOK || CONFIG_PMAC_BACKLIGHT */ +#endif /* CONFIG_PMAC_PBOOK || CONFIG_PMAC_BACKLIGHT || CONFIG_FB_ATY_CT_VAIO_LCD */ /* ------------------------------------------------------------------------- */ @@ -1772,6 +1773,9 @@ #if defined(CONFIG_PPC) int sense; #endif +#if defined(CONFIG_FB_ATY_CT_VAIO_LCD) + u32 pm, hs; +#endif u8 pll_ref_div; info->aty_cmap_regs = (struct aty_cmap_regs *)(info->ati_regbase+0xc0); @@ -2089,6 +2093,35 @@ var = default_var; #endif /* !__sparc__ */ #endif /* !CONFIG_PPC */ +#if defined(CONFIG_FB_ATY_CT_VAIO_LCD) + /* Power Management */ + pm=aty_ld_lcd(POWER_MANAGEMENT, info); + pm=(pm & ~PWR_MGT_MODE_MASK) | PWR_MGT_MODE_PCI; + pm|=PWR_MGT_ON; + aty_st_lcd(POWER_MANAGEMENT, pm, info); + udelay(10); + + /* OVR_WID_LEFT_RIGHT */ + hs=aty_ld_le32(OVR_WID_LEFT_RIGHT,info); + hs= 0x00000000; + aty_st_le32(OVR_WID_LEFT_RIGHT, hs, info); + udelay(10); + + /* CONFIG_PANEL */ + hs=aty_ld_lcd(CONFIG_PANEL,info); + hs|=DONT_SHADOW_HEND ; + aty_st_lcd(CONFIG_PANEL, hs, info); + udelay(10); + +#if defined(DEBUG) + printk("LCD_INDEX CONFIG_PANEL LCD_GEN_CTRL POWER_MANAGEMENT\n" + "%08x %08x %08x %08x\n", + aty_ld_le32(LCD_INDEX, info), + aty_ld_lcd(CONFIG_PANEL, info), + aty_ld_lcd(LCD_GEN_CTRL, info), + aty_ld_lcd(POWER_MANAGEMENT, info), +#endif /* DEBUG */ +#endif /* CONFIG_FB_ATY_CT_VAIO_LCD */ #endif /* !MODULE */ if (noaccel) var.accel_flags &= ~FB_ACCELF_TEXT; @@ -2714,6 +2747,23 @@ /* * Blank the display. */ +#if defined(CONFIG_FB_ATY_CT_VAIO_LCD) +static int set_backlight_enable(int on, struct fb_info_aty *info) +{ + unsigned int reg = aty_ld_lcd(POWER_MANAGEMENT, info); + if(on) { + reg=(reg & ~SUSPEND_NOW) | PWR_BLON; + } else { + reg=(reg & ~PWR_BLON) | SUSPEND_NOW; + } + aty_st_lcd(POWER_MANAGEMENT, reg, info); + udelay(10); +#ifdef DEBUG + printk(KERN_INFO "set_backlight_enable(%i): %08x\n", on, aty_ld_lcd(POWER_MANAGEMENT, info) ); +#endif + return 0; +} +#endif /* CONFIG_FB_ATY_CT_VAIO_LCD */ static void atyfbcon_blank(int blank, struct fb_info *fb) { @@ -2725,6 +2775,9 @@ set_backlight_enable(0); #endif /* CONFIG_PMAC_BACKLIGHT */ +#if defined(CONFIG_FB_ATY_CT_VAIO_LCD) + set_backlight_enable(!blank, info); +#endif /* CONFIG_FB_ATY_CT_VAIO_LCD */ gen_cntl = aty_ld_8(CRTC_GEN_CNTL, info); if (blank > 0) switch (blank-1) { diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/video/aty/mach64_ct.c linux.21pre4-ac2/drivers/video/aty/mach64_ct.c --- linux.21pre4/drivers/video/aty/mach64_ct.c 2003-01-29 16:26:59.000000000 +0000 +++ linux.21pre4-ac2/drivers/video/aty/mach64_ct.c 2003-01-06 17:41:28.000000000 +0000 @@ -178,11 +178,14 @@ } pll->pll_gen_cntl |= mpostdiv<<4; /* mclk */ - if (M64_HAS(MAGIC_POSTDIV)) - pll->pll_ext_cntl = 0; - else +#if defined(CONFIG_FB_ATY_CT_VAIO_LCD) pll->pll_ext_cntl = mpostdiv; /* xclk == mclk */ - +#else + if ( M64_HAS(MAGIC_POSTDIV) ) + pll->pll_ext_cntl = 0; + else + pll->pll_ext_cntl = mpostdiv; /* xclk == mclk */ +#endif switch (pll->vclk_post_div_real) { case 2: vpostdiv = 1; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/video/aty/mach64.h linux.21pre4-ac2/drivers/video/aty/mach64.h --- linux.21pre4/drivers/video/aty/mach64.h 2003-01-29 16:26:59.000000000 +0000 +++ linux.21pre4-ac2/drivers/video/aty/mach64.h 2003-01-06 17:41:28.000000000 +0000 @@ -1148,6 +1148,8 @@ #define APC_LUT_MN 0x39 #define APC_LUT_OP 0x3A +/* Values in CONFIG_PANEL */ +#define DONT_SHADOW_HEND 0x00004000 /* Values in LCD_MISC_CNTL */ #define BIAS_MOD_LEVEL_MASK 0x0000ff00 diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/video/Config.in linux.21pre4-ac2/drivers/video/Config.in --- linux.21pre4/drivers/video/Config.in 2003-01-29 16:26:56.000000000 +0000 +++ linux.21pre4-ac2/drivers/video/Config.in 2003-01-06 17:41:53.000000000 +0000 @@ -124,22 +124,34 @@ if [ "$CONFIG_FB_MATROX" != "n" ]; then bool ' Millennium I/II support' CONFIG_FB_MATROX_MILLENIUM bool ' Mystique support' CONFIG_FB_MATROX_MYSTIQUE - bool ' G100/G200/G400/G450/G550 support' CONFIG_FB_MATROX_G100 + bool ' G100/G200/G400/G450/G550 support' CONFIG_FB_MATROX_G450 + if [ "$CONFIG_FB_MATROX_G450" = "n" ]; then + bool ' G100/G200/G400 support' CONFIG_FB_MATROX_G100A + fi + if [ "$CONFIG_FB_MATROX_G450" = "y" -o "$CONFIG_FB_MATROX_G100A" = "y" ]; then + define_bool CONFIG_FB_MATROX_G100 y + fi if [ "$CONFIG_I2C" != "n" ]; then dep_tristate ' Matrox I2C support' CONFIG_FB_MATROX_I2C $CONFIG_FB_MATROX $CONFIG_I2C_ALGOBIT if [ "$CONFIG_FB_MATROX_G100" = "y" ]; then dep_tristate ' G400 second head support' CONFIG_FB_MATROX_MAVEN $CONFIG_FB_MATROX_I2C fi fi - dep_tristate ' G450/G550 second head support (mandatory for G550)' CONFIG_FB_MATROX_G450 $CONFIG_FB_MATROX_G100 + dep_tristate ' Matrox /proc interface' CONFIG_FB_MATROX_PROC $CONFIG_FB_MATROX bool ' Multihead support' CONFIG_FB_MATROX_MULTIHEAD fi tristate ' ATI Mach64 display support (EXPERIMENTAL)' CONFIG_FB_ATY if [ "$CONFIG_FB_ATY" != "n" ]; then bool ' Mach64 GX support (EXPERIMENTAL)' CONFIG_FB_ATY_GX bool ' Mach64 CT/VT/GT/LT (incl. 3D RAGE) support' CONFIG_FB_ATY_CT + if [ "$CONFIG_FB_ATY_CT" = "y" ]; then + bool ' Sony Vaio C1VE 1024x480 LCD support' CONFIG_FB_ATY_CT_VAIO_LCD + fi fi tristate ' ATI Radeon display support (EXPERIMENTAL)' CONFIG_FB_RADEON +# if [ "$CONFIG_FB_RADEON" = "y" ]; then +# bool ' Sony Vaio C1MV 1280x600 LCD support' CONFIG_FB_RADEON_VAIO_LCD +# fi tristate ' ATI Rage128 display support (EXPERIMENTAL)' CONFIG_FB_ATY128 tristate ' SIS acceleration (EXPERIMENTAL)' CONFIG_FB_SIS if [ "$CONFIG_FB_SIS" != "n" ]; then @@ -306,7 +318,7 @@ "$CONFIG_FB_VALKYRIE" = "m" -o "$CONFIG_FB_PLATINUM" = "m" -o \ "$CONFIG_FB_IGA" = "m" -o "$CONFIG_FB_MATROX" = "m" -o \ "$CONFIG_FB_CT65550" = "m" -o "$CONFIG_FB_PM2" = "m" -o \ - "$CONFIG_FB_PM3" = "m" -o "$CONFIG_FB_TRIDENT" = "y" -o \ + "$CONFIG_FB_PM3" = "m" -o "$CONFIG_FB_TRIDENT" = "m" -o \ "$CONFIG_FB_P9100" = "m" -o "$CONFIG_FB_ATY128" = "m" -o \ "$CONFIG_FB_RIVA" = "m" -o "$CONFIG_FB_3DFX" = "m" -o \ "$CONFIG_FB_SGIVW" = "m" -o "$CONFIG_FB_CYBER2000" = "m" -o \ @@ -344,7 +356,7 @@ "$CONFIG_FB_VALKYRIE" = "m" -o "$CONFIG_FB_PLATINUM" = "m" -o \ "$CONFIG_FB_CT65550" = "m" -o "$CONFIG_FB_MATROX" = "m" -o \ "$CONFIG_FB_PM2" = "m" -o "$CONFIG_FB_SGIVW" = "m" -o \ - "$CONFIG_FB_PM3" = "m" -o "$CONFIG_FB_TRIDENT" = "y" -o \ + "$CONFIG_FB_PM3" = "m" -o "$CONFIG_FB_TRIDENT" = "m" -o \ "$CONFIG_FB_RIVA" = "m" -o "$CONFIG_FB_ATY128" = "m" -o \ "$CONFIG_FB_CYBER2000" = "m" -o "$CONFIG_FB_SIS" = "m" -o \ "$CONFIG_FB_SA1100" = "m" -o "$CONFIG_FB_RADEON" = "m" -o \ @@ -389,12 +401,12 @@ "$CONFIG_FB_CONTROL" = "m" -o "$CONFIG_FB_CLGEN" = "m" -o \ "$CONFIG_FB_TGA" = "m" -o "$CONFIG_FB_PLATINUM" = "m" -o \ "$CONFIG_FB_MATROX" = "m" -o "$CONFIG_FB_PM2" = "m" -o \ - "$CONFIG_FB_PM3" = "m" -o "$CONFIG_FB_TRIDENT" = "y" -o \ + "$CONFIG_FB_PM3" = "m" -o "$CONFIG_FB_TRIDENT" = "m" -o \ "$CONFIG_FB_RIVA" = "m" -o "$CONFIG_FB_ATY128" = "m" -o \ "$CONFIG_FB_3DFX" = "m" -o "$CONFIG_FB_RADEON" = "m" -o \ "$CONFIG_FB_SGIVW" = "m" -o "$CONFIG_FB_SIS" = "m" -o \ "$CONFIG_FB_PVR2" = "m" -o "$CONFIG_FB_VOODOO1" = "m" -o \ - "$CONFIG_FB_CYBER2000" = "m" -o "$CONFIG_FB_STI" = "y" ]; then + "$CONFIG_FB_CYBER2000" = "m" -o "$CONFIG_FB_STI" = "m" ]; then define_tristate CONFIG_FBCON_CFB32 m fi fi @@ -441,9 +453,9 @@ define_tristate CONFIG_FBCON_HGA m fi fi - if [ "$CONFIG_FB_STI" = "y" ]; then - define_tristate CONFIG_FBCON_STI y - fi + fi + if [ "$CONFIG_FB_STI" = "y" ]; then + define_tristate CONFIG_FBCON_STI y fi bool ' Support only 8 pixels wide fonts' CONFIG_FBCON_FONTWIDTH8_ONLY if [ "$CONFIG_SPARC32" = "y" -o "$CONFIG_SPARC64" = "y" ]; then diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/video/modedb.c linux.21pre4-ac2/drivers/video/modedb.c --- linux.21pre4/drivers/video/modedb.c 2003-01-29 16:26:56.000000000 +0000 +++ linux.21pre4-ac2/drivers/video/modedb.c 2003-01-06 17:42:01.000000000 +0000 @@ -42,6 +42,20 @@ #define DEFAULT_MODEDB_INDEX 0 static struct fb_videomode modedb[] __initdata = { +#if defined(CONFIG_FB_ATY_CT_VAIO_LCD) + { + /* 1024x480 @ 65 Hz */ + NULL, 65, 1024, 480, 25203, 24, 24, 1, 17, 144, 4, + 0, FB_VMODE_NONINTERLACED + }, +#endif /* CONFIG_FB_ATY_CT_VAIO_LCD */ +#if defined(CONFIG_FB_RADEON_VAIO_LCD) + { + /* 1280x600 @ 72 Hz, 45.288 kHz hsync */ + NULL, 72, 1280, 600, 13940, 24, 24, 23, 1, 256, 5, + FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED + }, +#endif /* CONFIG_FB_RADEON_VAIO_LCD */ { /* 640x400 @ 70 Hz, 31.5 kHz hsync */ NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2, diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/drivers/video/pmagb-b-fb.h linux.21pre4-ac2/drivers/video/pmagb-b-fb.h --- linux.21pre4/drivers/video/pmagb-b-fb.h 2003-01-29 16:26:59.000000000 +0000 +++ linux.21pre4-ac2/drivers/video/pmagb-b-fb.h 2003-01-08 15:30:06.000000000 +0000 @@ -4,7 +4,7 @@ * TurboChannel PMAGB-B framebuffer card support, * Copyright (C) 1999, 2000, 2001 by * Michael Engel and - * Karsten Merker + * Karsten Merker * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of this * archive for more details. diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/binfmt_elf.c linux.21pre4-ac2/fs/binfmt_elf.c --- linux.21pre4/fs/binfmt_elf.c 2003-01-29 16:26:30.000000000 +0000 +++ linux.21pre4-ac2/fs/binfmt_elf.c 2003-01-06 19:14:41.000000000 +0000 @@ -1143,7 +1143,7 @@ psinfo.pr_state = i; psinfo.pr_sname = (i < 0 || i > 5) ? '.' : "RSDZTD"[i]; psinfo.pr_zomb = psinfo.pr_sname == 'Z'; - psinfo.pr_nice = current->nice; + psinfo.pr_nice = task_nice(current); psinfo.pr_flag = current->flags; psinfo.pr_uid = NEW_TO_OLD_UID(current->uid); psinfo.pr_gid = NEW_TO_OLD_GID(current->gid); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/block_dev.c linux.21pre4-ac2/fs/block_dev.c --- linux.21pre4/fs/block_dev.c 2003-01-29 16:26:30.000000000 +0000 +++ linux.21pre4-ac2/fs/block_dev.c 2003-01-06 22:06:17.000000000 +0000 @@ -131,8 +131,9 @@ return 0; } -static int blkdev_direct_IO(int rw, struct inode * inode, struct kiobuf * iobuf, unsigned long blocknr, int blocksize) +static int blkdev_direct_IO(int rw, struct file * filp, struct kiobuf * iobuf, unsigned long blocknr, int blocksize) { + struct inode * inode = filp->f_dentry->d_inode->i_mapping->host; return generic_direct_IO(rw, inode, iobuf, blocknr, blocksize, blkdev_get_block); } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/buffer.c linux.21pre4-ac2/fs/buffer.c --- linux.21pre4/fs/buffer.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/fs/buffer.c 2003-01-07 00:38:38.000000000 +0000 @@ -323,7 +323,7 @@ lock_kernel(); sync_inodes_sb(sb); - DQUOT_SYNC(dev); + DQUOT_SYNC_SB(sb); lock_super(sb); if (sb->s_dirt && sb->s_op && sb->s_op->write_super) sb->s_op->write_super(sb); @@ -347,7 +347,7 @@ lock_kernel(); sync_inodes(dev); - DQUOT_SYNC(dev); + DQUOT_SYNC_DEV(dev); sync_supers(dev, 1); unlock_kernel(); @@ -735,6 +735,7 @@ bh->b_list = BUF_CLEAN; bh->b_end_io = handler; bh->b_private = private; + bh->b_journal_head = NULL; } static void end_buffer_io_async(struct buffer_head * bh, int uptodate) @@ -2784,7 +2785,7 @@ hash_table = (struct buffer_head **) __get_free_pages(GFP_ATOMIC, order); } while (hash_table == NULL && --order > 0); - printk("Buffer-cache hash table entries: %d (order: %d, %ld bytes)\n", + printk(KERN_INFO "Buffer cache hash table entries: %d (order: %d, %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order)); if (!hash_table) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/Config.in linux.21pre4-ac2/fs/Config.in --- linux.21pre4/fs/Config.in 2003-01-29 16:26:30.000000000 +0000 +++ linux.21pre4-ac2/fs/Config.in 2003-01-06 23:21:40.000000000 +0000 @@ -5,6 +5,14 @@ comment 'File systems' bool 'Quota support' CONFIG_QUOTA +dep_tristate ' Old quota format support' CONFIG_QFMT_V1 $CONFIG_QUOTA +dep_tristate ' VFS v0 quota format support' CONFIG_QFMT_V2 $CONFIG_QUOTA +dep_mbool ' Compatible quota interfaces' CONFIG_QIFACE_COMPAT $CONFIG_QUOTA +if [ "$CONFIG_QUOTA" = "y" -a "$CONFIG_QIFACE_COMPAT" = "y" ]; then + choice ' Compatible quota interfaces' \ + "Original CONFIG_QIFACE_V1 \ + VFSv0 CONFIG_QIFACE_V2" Original +fi tristate 'Kernel automounter support' CONFIG_AUTOFS_FS tristate 'Kernel automounter version 4 support (also supports v3)' CONFIG_AUTOFS4_FS @@ -102,6 +110,7 @@ dep_tristate 'InterMezzo file system support (replicating fs) (EXPERIMENTAL)' CONFIG_INTERMEZZO_FS $CONFIG_INET $CONFIG_EXPERIMENTAL dep_tristate 'NFS file system support' CONFIG_NFS_FS $CONFIG_INET dep_mbool ' Provide NFSv3 client support' CONFIG_NFS_V3 $CONFIG_NFS_FS + dep_mbool ' Allow direct I/O on NFS files (EXPERIMENTAL)' CONFIG_NFS_DIRECTIO $CONFIG_NFS_FS $CONFIG_EXPERIMENTAL dep_bool ' Root file system on NFS' CONFIG_ROOT_NFS $CONFIG_NFS_FS $CONFIG_IP_PNP dep_tristate 'NFS server support' CONFIG_NFSD $CONFIG_INET diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/dquot.c linux.21pre4-ac2/fs/dquot.c --- linux.21pre4/fs/dquot.c 2003-01-29 16:26:30.000000000 +0000 +++ linux.21pre4-ac2/fs/dquot.c 2003-01-06 23:21:47.000000000 +0000 @@ -35,7 +35,7 @@ * Jan Kara, , sponsored by SuSE CR, 10-11/99 * * Used struct list_head instead of own list struct - * Invalidation of dquots with dq_count > 0 no longer possible + * Invalidation of referenced dquots is no longer possible * Improved free_dquots list management * Quota and i_blocks are now updated in one place to avoid races * Warnings are now delayed so we won't block in critical section @@ -45,6 +45,10 @@ * Added dynamic quota structure allocation * Jan Kara 12/2000 * + * Rewritten quota interface. Implemented new quota format and + * formats registering. + * Jan Kara, , 2001,2002 + * * (C) Copyright 1994 - 1997 Marco van Wieringen */ @@ -59,20 +63,53 @@ #include #include #include +#include #include #include +#include +#include #include -#define __DQUOT_VERSION__ "dquot_6.4.0" +static char *quotatypes[] = INITQFNAMES; +static struct quota_format_type *quota_formats; /* List of registered formats */ -int nr_dquots, nr_free_dquots; +int register_quota_format(struct quota_format_type *fmt) +{ + lock_kernel(); + fmt->qf_next = quota_formats; + quota_formats = fmt; + unlock_kernel(); + return 0; +} -static char *quotatypes[] = INITQFNAMES; +void unregister_quota_format(struct quota_format_type *fmt) +{ + struct quota_format_type **actqf; -static inline struct quota_mount_options *sb_dqopt(struct super_block *sb) + lock_kernel(); + for (actqf = "a_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next); + if (*actqf) + *actqf = (*actqf)->qf_next; + unlock_kernel(); +} + +static struct quota_format_type *find_quota_format(int id) { - return &sb->s_dquot; + struct quota_format_type *actqf; + + lock_kernel(); + for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); + if (actqf && !try_inc_mod_count(actqf->qf_owner)) + actqf = NULL; + unlock_kernel(); + return actqf; +} + +static void put_quota_format(struct quota_format_type *fmt) +{ + if (fmt->qf_owner) + __MOD_DEC_USE_COUNT(fmt->qf_owner); } /* @@ -85,11 +122,11 @@ * list is used for the sync and invalidate operations, which must look * at every dquot. * - * Unused dquots (dq_count == 0) are added to the free_dquots list when - * freed, and this list is searched whenever we need an available dquot. - * Dquots are removed from the list as soon as they are used again, and - * nr_free_dquots gives the number of dquots on the list. When dquot is - * invalidated it's completely released from memory. + * Unused dquots (dq_count == 0) are added to the free_dquots list when freed, + * and this list is searched whenever we need an available dquot. Dquots are + * removed from the list as soon as they are used again, and + * dqstats.free_dquots gives the number of dquots on the list. When + * dquot is invalidated it's completely released from memory. * * Dquots with a specific identity (device, type and id) are placed on * one of the dquot_hash[] hash chains. The provides an efficient search @@ -115,35 +152,39 @@ static LIST_HEAD(free_dquots); static struct list_head dquot_hash[NR_DQHASH]; -static struct dqstats dqstats; +struct dqstats dqstats; static void dqput(struct dquot *); static struct dquot *dqduplicate(struct dquot *); -static inline char is_enabled(struct quota_mount_options *dqopt, short type) +static inline void get_dquot_ref(struct dquot *dquot) { - switch (type) { - case USRQUOTA: - return((dqopt->flags & DQUOT_USR_ENABLED) != 0); - case GRPQUOTA: - return((dqopt->flags & DQUOT_GRP_ENABLED) != 0); - } - return(0); + dquot->dq_count++; +} + +static inline void put_dquot_ref(struct dquot *dquot) +{ + dquot->dq_count--; +} + +static inline void get_dquot_dup_ref(struct dquot *dquot) +{ + dquot->dq_dup_ref++; } -static inline char sb_has_quota_enabled(struct super_block *sb, short type) +static inline void put_dquot_dup_ref(struct dquot *dquot) { - return is_enabled(sb_dqopt(sb), type); + dquot->dq_dup_ref--; } -static inline int const hashfn(kdev_t dev, unsigned int id, short type) +static inline int const hashfn(struct super_block *sb, unsigned int id, int type) { - return((HASHDEV(dev) ^ id) * (MAXQUOTAS - type)) % NR_DQHASH; + return((((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type)) % NR_DQHASH; } static inline void insert_dquot_hash(struct dquot *dquot) { - struct list_head *head = dquot_hash + hashfn(dquot->dq_dev, dquot->dq_id, dquot->dq_type); + struct list_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type); list_add(&dquot->dq_hash, head); } @@ -153,14 +194,14 @@ INIT_LIST_HEAD(&dquot->dq_hash); } -static inline struct dquot *find_dquot(unsigned int hashent, kdev_t dev, unsigned int id, short type) +static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type) { struct list_head *head; struct dquot *dquot; for (head = dquot_hash[hashent].next; head != dquot_hash+hashent; head = head->next) { dquot = list_entry(head, struct dquot, dq_hash); - if (dquot->dq_dev == dev && dquot->dq_id == id && dquot->dq_type == type) + if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type) return dquot; } return NODQUOT; @@ -170,14 +211,14 @@ static inline void put_dquot_head(struct dquot *dquot) { list_add(&dquot->dq_free, &free_dquots); - nr_free_dquots++; + dqstats.free_dquots++; } /* Add a dquot to the tail of the free list */ static inline void put_dquot_last(struct dquot *dquot) { list_add(&dquot->dq_free, free_dquots.prev); - nr_free_dquots++; + dqstats.free_dquots++; } /* Move dquot to the head of free list (it must be already on it) */ @@ -193,7 +234,7 @@ return; list_del(&dquot->dq_free); INIT_LIST_HEAD(&dquot->dq_free); - nr_free_dquots--; + dqstats.free_dquots--; } static inline void put_inuse(struct dquot *dquot) @@ -201,12 +242,12 @@ /* We add to the back of inuse list so we don't have to restart * when traversing this list and we block */ list_add(&dquot->dq_inuse, inuse_list.prev); - nr_dquots++; + dqstats.allocated_dquots++; } static inline void remove_inuse(struct dquot *dquot) { - nr_dquots--; + dqstats.allocated_dquots--; list_del(&dquot->dq_inuse); } @@ -243,6 +284,7 @@ wake_up(&dquot->dq_wait_lock); } +/* Wait for dquot to be unused */ static void __wait_dquot_unused(struct dquot *dquot) { DECLARE_WAITQUEUE(wait, current); @@ -258,85 +300,56 @@ current->state = TASK_RUNNING; } -/* - * We don't have to be afraid of deadlocks as we never have quotas on quota files... - */ -static void write_dquot(struct dquot *dquot) +/* Wait for all duplicated dquot references to be dropped */ +static void __wait_dup_drop(struct dquot *dquot) { - short type = dquot->dq_type; - struct file *filp; - mm_segment_t fs; - loff_t offset; - ssize_t ret; - struct semaphore *sem = &dquot->dq_sb->s_dquot.dqio_sem; - struct dqblk dqbuf; - - down(sem); - filp = dquot->dq_sb->s_dquot.files[type]; - offset = dqoff(dquot->dq_id); - fs = get_fs(); - set_fs(KERNEL_DS); + DECLARE_WAITQUEUE(wait, current); - /* - * Note: clear the DQ_MOD flag unconditionally, - * so we don't loop forever on failure. - */ - memcpy(&dqbuf, &dquot->dq_dqb, sizeof(struct dqblk)); - dquot->dq_flags &= ~DQ_MOD; - ret = 0; - if (filp) - ret = filp->f_op->write(filp, (char *)&dqbuf, - sizeof(struct dqblk), &offset); - if (ret != sizeof(struct dqblk)) - printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", - kdevname(dquot->dq_dev)); - - set_fs(fs); - up(sem); - dqstats.writes++; -} - -static void read_dquot(struct dquot *dquot) -{ - short type = dquot->dq_type; - struct file *filp; - mm_segment_t fs; - loff_t offset; + add_wait_queue(&dquot->dq_wait_free, &wait); +repeat: + set_current_state(TASK_UNINTERRUPTIBLE); + if (dquot->dq_dup_ref) { + schedule(); + goto repeat; + } + remove_wait_queue(&dquot->dq_wait_free, &wait); + current->state = TASK_RUNNING; +} - filp = dquot->dq_sb->s_dquot.files[type]; - if (filp == (struct file *)NULL) - return; +static int read_dqblk(struct dquot *dquot) +{ + int ret; + struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); lock_dquot(dquot); - if (!dquot->dq_sb) /* Invalidated quota? */ - goto out_lock; - /* Now we are sure filp is valid - the dquot isn't invalidated */ - down(&dquot->dq_sb->s_dquot.dqio_sem); - offset = dqoff(dquot->dq_id); - fs = get_fs(); - set_fs(KERNEL_DS); - filp->f_op->read(filp, (char *)&dquot->dq_dqb, sizeof(struct dqblk), &offset); - up(&dquot->dq_sb->s_dquot.dqio_sem); - set_fs(fs); - - if (dquot->dq_bhardlimit == 0 && dquot->dq_bsoftlimit == 0 && - dquot->dq_ihardlimit == 0 && dquot->dq_isoftlimit == 0) - dquot->dq_flags |= DQ_FAKE; - dqstats.reads++; -out_lock: + down(&dqopt->dqio_sem); + ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); + up(&dqopt->dqio_sem); unlock_dquot(dquot); + return ret; +} + +static int commit_dqblk(struct dquot *dquot) +{ + int ret; + struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); + + down(&dqopt->dqio_sem); + ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); + up(&dqopt->dqio_sem); + return ret; } /* Invalidate all dquots on the list, wait for all users. Note that this function is called * after quota is disabled so no new quota might be created. As we only insert to the end of * inuse list, we don't have to restart searching... */ -static void invalidate_dquots(struct super_block *sb, short type) +static void invalidate_dquots(struct super_block *sb, int type) { struct dquot *dquot; struct list_head *head; restart: - for (head = inuse_list.next; head != &inuse_list; head = head->next) { + list_for_each(head, &inuse_list) { dquot = list_entry(head, struct dquot, dq_inuse); if (dquot->dq_sb != sb) continue; @@ -359,37 +372,107 @@ } } -int sync_dquots(kdev_t dev, short type) +static int vfs_quota_sync(struct super_block *sb, int type) { struct list_head *head; struct dquot *dquot; + struct quota_info *dqopt = sb_dqopt(sb); + int cnt; - lock_kernel(); restart: - for (head = inuse_list.next; head != &inuse_list; head = head->next) { + list_for_each(head, &inuse_list) { dquot = list_entry(head, struct dquot, dq_inuse); - if (dev && dquot->dq_dev != dev) + if (sb && dquot->dq_sb != sb) continue; if (type != -1 && dquot->dq_type != type) continue; if (!dquot->dq_sb) /* Invalidated? */ continue; - if (!(dquot->dq_flags & (DQ_MOD | DQ_LOCKED))) + if (!dquot_dirty(dquot) && !(dquot->dq_flags & DQ_LOCKED)) continue; - /* Raise use count so quota won't be invalidated. We can't use dqduplicate() as it does too many tests */ - dquot->dq_count++; + /* Get reference to quota so it won't be invalidated. get_dquot_ref() + * is enough since if dquot is locked/modified it can't be + * on the free list */ + get_dquot_ref(dquot); if (dquot->dq_flags & DQ_LOCKED) wait_on_dquot(dquot); - if (dquot->dq_flags & DQ_MOD) - write_dquot(dquot); + if (dquot_dirty(dquot)) + commit_dqblk(dquot); dqput(dquot); goto restart; } + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if ((cnt == type || type == -1) && sb_has_quota_enabled(sb, cnt)) + dqopt->info[cnt].dqi_flags &= ~DQF_ANY_DQUOT_DIRTY; + for (cnt = 0; cnt < MAXQUOTAS; cnt++) + if ((cnt == type || type == -1) && sb_has_quota_enabled(sb, cnt) && info_dirty(&dqopt->info[cnt])) + dqopt->ops[cnt]->write_file_info(sb, cnt); dqstats.syncs++; - unlock_kernel(); + return 0; } +static struct super_block *get_super_to_sync(int type) +{ + struct list_head *head; + int cnt, dirty; + +restart: + spin_lock(&sb_lock); + list_for_each(head, &super_blocks) { + struct super_block *sb = list_entry(head, struct super_block, s_list); + + for (cnt = 0, dirty = 0; cnt < MAXQUOTAS; cnt++) + if ((type == cnt || type == -1) && sb_has_quota_enabled(sb, cnt) + && sb_dqopt(sb)->info[cnt].dqi_flags & DQF_ANY_DQUOT_DIRTY) + dirty = 1; + if (!dirty) + continue; + sb->s_count++; + spin_unlock(&sb_lock); + down_read(&sb->s_umount); + if (!sb->s_root) { + drop_super(sb); + goto restart; + } + return sb; + } + spin_unlock(&sb_lock); + return NULL; +} + +void sync_dquots_dev(kdev_t dev, int type) +{ + struct super_block *sb; + + if (dev) { + if ((sb = get_super(dev))) { + lock_kernel(); + if (sb->s_qcop->quota_sync) + sb->s_qcop->quota_sync(sb, type); + unlock_kernel(); + drop_super(sb); + } + } + else { + while ((sb = get_super_to_sync(type))) { + lock_kernel(); + if (sb->s_qcop->quota_sync) + sb->s_qcop->quota_sync(sb, type); + unlock_kernel(); + drop_super(sb); + } + } +} + +void sync_dquots_sb(struct super_block *sb, int type) +{ + lock_kernel(); + if (sb->s_qcop->quota_sync) + sb->s_qcop->quota_sync(sb, type); + unlock_kernel(); +} + /* Free unused dquots from cache */ static void prune_dqcache(int count) { @@ -408,19 +491,38 @@ } } +/* + * This is called from kswapd when we think we need some + * more memory, but aren't really sure how much. So we + * carefully try to free a _bit_ of our dqcache, but not + * too much. + * + * Priority: + * 1 - very urgent: shrink everything + * ... + * 6 - base-level: try to shrink a bit. + */ + int shrink_dqcache_memory(int priority, unsigned int gfp_mask) { + int count = 0; + lock_kernel(); - prune_dqcache(nr_free_dquots / (priority + 1)); + count = dqstats.free_dquots / priority; + prune_dqcache(count); unlock_kernel(); return kmem_cache_shrink(dquot_cachep); } -/* NOTE: If you change this function please check whether dqput_blocks() works right... */ +/* + * Put reference to dquot + * NOTE: If you change this function please check whether dqput_blocks() works right... + */ static void dqput(struct dquot *dquot) { if (!dquot) return; +#ifdef __DQUOT_PARANOIA if (!dquot->dq_count) { printk("VFS: dqput: trying to free free dquot\n"); printk("VFS: device %s, dquot of %s %d\n", @@ -428,33 +530,38 @@ dquot->dq_id); return; } +#endif dqstats.drops++; we_slept: + if (dquot->dq_dup_ref && dquot->dq_count - dquot->dq_dup_ref <= 1) { /* Last unduplicated reference? */ + __wait_dup_drop(dquot); + goto we_slept; + } if (dquot->dq_count > 1) { /* We have more than one user... We can simply decrement use count */ - dquot->dq_count--; + put_dquot_ref(dquot); return; } - if (dquot->dq_flags & DQ_MOD) { - write_dquot(dquot); + if (dquot_dirty(dquot)) { + commit_dqblk(dquot); goto we_slept; } /* sanity check */ if (!list_empty(&dquot->dq_free)) { printk(KERN_ERR "dqput: dquot already on free list??\n"); - dquot->dq_count--; /* J.K. Just decrementing use count seems safer... */ + put_dquot_ref(dquot); return; } - dquot->dq_count--; + put_dquot_ref(dquot); /* If dquot is going to be invalidated invalidate_dquots() is going to free it so */ if (!(dquot->dq_flags & DQ_INVAL)) put_dquot_last(dquot); /* Place at end of LRU free queue */ wake_up(&dquot->dq_wait_free); } -static struct dquot *get_empty_dquot(void) +static struct dquot *get_empty_dquot(struct super_block *sb, int type) { struct dquot *dquot; @@ -468,6 +575,9 @@ INIT_LIST_HEAD(&dquot->dq_free); INIT_LIST_HEAD(&dquot->dq_inuse); INIT_LIST_HEAD(&dquot->dq_hash); + dquot->dq_sb = sb; + dquot->dq_dev = sb->s_dev; + dquot->dq_type = type; dquot->dq_count = 1; /* all dquots go on the inuse_list */ put_inuse(dquot); @@ -475,11 +585,11 @@ return dquot; } -static struct dquot *dqget(struct super_block *sb, unsigned int id, short type) +static struct dquot *dqget(struct super_block *sb, unsigned int id, int type) { - unsigned int hashent = hashfn(sb->s_dev, id, type); + unsigned int hashent = hashfn(sb, id, type); struct dquot *dquot, *empty = NODQUOT; - struct quota_mount_options *dqopt = sb_dqopt(sb); + struct quota_info *dqopt = sb_dqopt(sb); we_slept: if (!is_enabled(dqopt, type)) { @@ -488,23 +598,21 @@ return NODQUOT; } - if ((dquot = find_dquot(hashent, sb->s_dev, id, type)) == NODQUOT) { + if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) { if (empty == NODQUOT) { - if ((empty = get_empty_dquot()) == NODQUOT) + if ((empty = get_empty_dquot(sb, type)) == NODQUOT) schedule(); /* Try to wait for a moment... */ goto we_slept; } dquot = empty; - dquot->dq_id = id; - dquot->dq_type = type; - dquot->dq_dev = sb->s_dev; - dquot->dq_sb = sb; + dquot->dq_id = id; /* hash it first so it can be found */ insert_dquot_hash(dquot); - read_dquot(dquot); + read_dqblk(dquot); } else { - if (!dquot->dq_count++) + if (!dquot->dq_count) remove_free_dquot(dquot); + get_dquot_ref(dquot); dqstats.cache_hits++; wait_on_dquot(dquot); if (empty) @@ -516,30 +624,47 @@ dqput(dquot); return NODQUOT; } - dquot->dq_referenced++; + ++dquot->dq_referenced; dqstats.lookups++; return dquot; } +/* Duplicate reference to dquot got from inode */ static struct dquot *dqduplicate(struct dquot *dquot) { if (dquot == NODQUOT) return NODQUOT; - dquot->dq_count++; + get_dquot_ref(dquot); if (!dquot->dq_sb) { printk(KERN_ERR "VFS: dqduplicate(): Invalidated quota to be duplicated!\n"); - dquot->dq_count--; + put_dquot_ref(dquot); return NODQUOT; } if (dquot->dq_flags & DQ_LOCKED) printk(KERN_ERR "VFS: dqduplicate(): Locked quota to be duplicated!\n"); + get_dquot_dup_ref(dquot); dquot->dq_referenced++; dqstats.lookups++; + return dquot; } -static int dqinit_needed(struct inode *inode, short type) +/* Put duplicated reference */ +static void dqputduplicate(struct dquot *dquot) +{ + if (!dquot->dq_dup_ref) { + printk(KERN_ERR "VFS: dqputduplicate(): Duplicated dquot put without duplicate reference.\n"); + return; + } + put_dquot_dup_ref(dquot); + if (!dquot->dq_dup_ref) + wake_up(&dquot->dq_wait_free); + put_dquot_ref(dquot); + dqstats.drops++; +} + +static int dqinit_needed(struct inode *inode, int type) { int cnt; @@ -553,16 +678,13 @@ return 0; } -static void add_dquot_ref(struct super_block *sb, short type) +static void add_dquot_ref(struct super_block *sb, int type) { struct list_head *p; - if (!sb->dq_op) - return; /* nothing to do */ - restart: file_list_lock(); - for (p = sb->s_files.next; p != &sb->s_files; p = p->next) { + list_for_each(p, &sb->s_files) { struct file *filp = list_entry(p, struct file, f_list); struct inode *inode = filp->f_dentry->d_inode; if (filp->f_mode & FMODE_WRITE && dqinit_needed(inode, type)) { @@ -582,13 +704,15 @@ /* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */ static inline int dqput_blocks(struct dquot *dquot) { - if (dquot->dq_count == 1) + if (dquot->dq_dup_ref && dquot->dq_count - dquot->dq_dup_ref <= 1) + return 1; + if (dquot->dq_count <= 1 && dquot->dq_flags & DQ_MOD) return 1; return 0; } /* Remove references to dquots from inode - add dquot to list for freeing if needed */ -int remove_inode_dquot_ref(struct inode *inode, short type, struct list_head *tofree_head) +int remove_inode_dquot_ref(struct inode *inode, int type, struct list_head *tofree_head) { struct dquot *dquot = inode->i_dquot[type]; int cnt; @@ -635,38 +759,38 @@ static inline void dquot_incr_inodes(struct dquot *dquot, unsigned long number) { - dquot->dq_curinodes += number; - dquot->dq_flags |= DQ_MOD; + dquot->dq_dqb.dqb_curinodes += number; + mark_dquot_dirty(dquot); } -static inline void dquot_incr_blocks(struct dquot *dquot, unsigned long number) +static inline void dquot_incr_space(struct dquot *dquot, qsize_t number) { - dquot->dq_curblocks += number; - dquot->dq_flags |= DQ_MOD; + dquot->dq_dqb.dqb_curspace += number; + mark_dquot_dirty(dquot); } static inline void dquot_decr_inodes(struct dquot *dquot, unsigned long number) { - if (dquot->dq_curinodes > number) - dquot->dq_curinodes -= number; + if (dquot->dq_dqb.dqb_curinodes > number) + dquot->dq_dqb.dqb_curinodes -= number; else - dquot->dq_curinodes = 0; - if (dquot->dq_curinodes < dquot->dq_isoftlimit) - dquot->dq_itime = (time_t) 0; + dquot->dq_dqb.dqb_curinodes = 0; + if (dquot->dq_dqb.dqb_curinodes < dquot->dq_dqb.dqb_isoftlimit) + dquot->dq_dqb.dqb_itime = (time_t) 0; dquot->dq_flags &= ~DQ_INODES; - dquot->dq_flags |= DQ_MOD; + mark_dquot_dirty(dquot); } -static inline void dquot_decr_blocks(struct dquot *dquot, unsigned long number) +static inline void dquot_decr_space(struct dquot *dquot, qsize_t number) { - if (dquot->dq_curblocks > number) - dquot->dq_curblocks -= number; + if (dquot->dq_dqb.dqb_curspace > number) + dquot->dq_dqb.dqb_curspace -= number; else - dquot->dq_curblocks = 0; - if (dquot->dq_curblocks < dquot->dq_bsoftlimit) - dquot->dq_btime = (time_t) 0; + dquot->dq_dqb.dqb_curspace = 0; + if (toqb(dquot->dq_dqb.dqb_curspace) < dquot->dq_dqb.dqb_bsoftlimit) + dquot->dq_dqb.dqb_btime = (time_t) 0; dquot->dq_flags &= ~DQ_BLKS; - dquot->dq_flags |= DQ_MOD; + mark_dquot_dirty(dquot); } static inline int need_print_warning(struct dquot *dquot, int flag) @@ -739,7 +863,10 @@ static inline char ignore_hardlimit(struct dquot *dquot) { - return capable(CAP_SYS_RESOURCE) && !dquot->dq_sb->s_dquot.rsquash[dquot->dq_type]; + struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; + + return capable(CAP_SYS_RESOURCE) && + (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH)); } static int check_idq(struct dquot *dquot, ulong inodes, char *warntype) @@ -748,60 +875,60 @@ if (inodes <= 0 || dquot->dq_flags & DQ_FAKE) return QUOTA_OK; - if (dquot->dq_ihardlimit && - (dquot->dq_curinodes + inodes) > dquot->dq_ihardlimit && + if (dquot->dq_dqb.dqb_ihardlimit && + (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit && !ignore_hardlimit(dquot)) { *warntype = IHARDWARN; return NO_QUOTA; } - if (dquot->dq_isoftlimit && - (dquot->dq_curinodes + inodes) > dquot->dq_isoftlimit && - dquot->dq_itime && CURRENT_TIME >= dquot->dq_itime && + if (dquot->dq_dqb.dqb_isoftlimit && + (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && + dquot->dq_dqb.dqb_itime && CURRENT_TIME >= dquot->dq_dqb.dqb_itime && !ignore_hardlimit(dquot)) { *warntype = ISOFTLONGWARN; return NO_QUOTA; } - if (dquot->dq_isoftlimit && - (dquot->dq_curinodes + inodes) > dquot->dq_isoftlimit && - dquot->dq_itime == 0) { + if (dquot->dq_dqb.dqb_isoftlimit && + (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && + dquot->dq_dqb.dqb_itime == 0) { *warntype = ISOFTWARN; - dquot->dq_itime = CURRENT_TIME + dquot->dq_sb->s_dquot.inode_expire[dquot->dq_type]; + dquot->dq_dqb.dqb_itime = CURRENT_TIME + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; } return QUOTA_OK; } -static int check_bdq(struct dquot *dquot, ulong blocks, char prealloc, char *warntype) +static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) { *warntype = 0; - if (blocks <= 0 || dquot->dq_flags & DQ_FAKE) + if (space <= 0 || dquot->dq_flags & DQ_FAKE) return QUOTA_OK; - if (dquot->dq_bhardlimit && - (dquot->dq_curblocks + blocks) > dquot->dq_bhardlimit && + if (dquot->dq_dqb.dqb_bhardlimit && + toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bhardlimit && !ignore_hardlimit(dquot)) { if (!prealloc) *warntype = BHARDWARN; return NO_QUOTA; } - if (dquot->dq_bsoftlimit && - (dquot->dq_curblocks + blocks) > dquot->dq_bsoftlimit && - dquot->dq_btime && CURRENT_TIME >= dquot->dq_btime && + if (dquot->dq_dqb.dqb_bsoftlimit && + toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bsoftlimit && + dquot->dq_dqb.dqb_btime && CURRENT_TIME >= dquot->dq_dqb.dqb_btime && !ignore_hardlimit(dquot)) { if (!prealloc) *warntype = BSOFTLONGWARN; return NO_QUOTA; } - if (dquot->dq_bsoftlimit && - (dquot->dq_curblocks + blocks) > dquot->dq_bsoftlimit && - dquot->dq_btime == 0) { + if (dquot->dq_dqb.dqb_bsoftlimit && + toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bsoftlimit && + dquot->dq_dqb.dqb_btime == 0) { if (!prealloc) { *warntype = BSOFTWARN; - dquot->dq_btime = CURRENT_TIME + dquot->dq_sb->s_dquot.block_expire[dquot->dq_type]; + dquot->dq_dqb.dqb_btime = CURRENT_TIME + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace; } else /* @@ -815,148 +942,15 @@ } /* - * Initialize a dquot-struct with new quota info. This is used by the - * system call interface functions. - */ -static int set_dqblk(struct super_block *sb, int id, short type, int flags, struct dqblk *dqblk) -{ - struct dquot *dquot; - int error = -EFAULT; - struct dqblk dq_dqblk; - - if (copy_from_user(&dq_dqblk, dqblk, sizeof(struct dqblk))) - return error; - - if (sb && (dquot = dqget(sb, id, type)) != NODQUOT) { - /* We can't block while changing quota structure... */ - if (id > 0 && ((flags & SET_QUOTA) || (flags & SET_QLIMIT))) { - dquot->dq_bhardlimit = dq_dqblk.dqb_bhardlimit; - dquot->dq_bsoftlimit = dq_dqblk.dqb_bsoftlimit; - dquot->dq_ihardlimit = dq_dqblk.dqb_ihardlimit; - dquot->dq_isoftlimit = dq_dqblk.dqb_isoftlimit; - } - - if ((flags & SET_QUOTA) || (flags & SET_USE)) { - if (dquot->dq_isoftlimit && - dquot->dq_curinodes < dquot->dq_isoftlimit && - dq_dqblk.dqb_curinodes >= dquot->dq_isoftlimit) - dquot->dq_itime = CURRENT_TIME + dquot->dq_sb->s_dquot.inode_expire[type]; - dquot->dq_curinodes = dq_dqblk.dqb_curinodes; - if (dquot->dq_curinodes < dquot->dq_isoftlimit) - dquot->dq_flags &= ~DQ_INODES; - if (dquot->dq_bsoftlimit && - dquot->dq_curblocks < dquot->dq_bsoftlimit && - dq_dqblk.dqb_curblocks >= dquot->dq_bsoftlimit) - dquot->dq_btime = CURRENT_TIME + dquot->dq_sb->s_dquot.block_expire[type]; - dquot->dq_curblocks = dq_dqblk.dqb_curblocks; - if (dquot->dq_curblocks < dquot->dq_bsoftlimit) - dquot->dq_flags &= ~DQ_BLKS; - } - - if (id == 0) { - dquot->dq_sb->s_dquot.block_expire[type] = dquot->dq_btime = dq_dqblk.dqb_btime; - dquot->dq_sb->s_dquot.inode_expire[type] = dquot->dq_itime = dq_dqblk.dqb_itime; - } - - if (dq_dqblk.dqb_bhardlimit == 0 && dq_dqblk.dqb_bsoftlimit == 0 && - dq_dqblk.dqb_ihardlimit == 0 && dq_dqblk.dqb_isoftlimit == 0) - dquot->dq_flags |= DQ_FAKE; - else - dquot->dq_flags &= ~DQ_FAKE; - - dquot->dq_flags |= DQ_MOD; - dqput(dquot); - } - return 0; -} - -static int get_quota(struct super_block *sb, int id, short type, struct dqblk *dqblk) -{ - struct dquot *dquot; - struct dqblk data; - int error = -ESRCH; - - if (!sb || !sb_has_quota_enabled(sb, type)) - goto out; - dquot = dqget(sb, id, type); - if (dquot == NODQUOT) - goto out; - - memcpy(&data, &dquot->dq_dqb, sizeof(struct dqblk)); /* We copy data to preserve them from changing */ - dqput(dquot); - error = -EFAULT; - if (dqblk && !copy_to_user(dqblk, &data, sizeof(struct dqblk))) - error = 0; -out: - return error; -} - -static int get_stats(caddr_t addr) -{ - int error = -EFAULT; - struct dqstats stats; - - dqstats.allocated_dquots = nr_dquots; - dqstats.free_dquots = nr_free_dquots; - - /* make a copy, in case we page-fault in user space */ - memcpy(&stats, &dqstats, sizeof(struct dqstats)); - if (!copy_to_user(addr, &stats, sizeof(struct dqstats))) - error = 0; - return error; -} - -static int quota_root_squash(struct super_block *sb, short type, int *addr) -{ - int new_value, error; - - if (!sb) - return(-ENODEV); - - error = -EFAULT; - if (!copy_from_user(&new_value, addr, sizeof(int))) { - sb_dqopt(sb)->rsquash[type] = new_value; - error = 0; - } - return error; -} - -#if 0 /* We are not going to support filesystems without i_blocks... */ -/* - * This is a simple algorithm that calculates the size of a file in blocks. - * This is only used on filesystems that do not have an i_blocks count. - */ -static u_long isize_to_blocks(loff_t isize, size_t blksize_bits) -{ - u_long blocks; - u_long indirect; - - if (!blksize_bits) - blksize_bits = BLOCK_SIZE_BITS; - blocks = (isize >> blksize_bits) + ((isize & ~((1 << blksize_bits)-1)) ? 1 : 0); - if (blocks > 10) { - indirect = ((blocks - 11) >> 8) + 1; /* single indirect blocks */ - if (blocks > (10 + 256)) { - indirect += ((blocks - 267) >> 16) + 1; /* double indirect blocks */ - if (blocks > (10 + 256 + (256 << 8))) - indirect++; /* triple indirect blocks */ - } - blocks += indirect; - } - return blocks; -} -#endif - -/* * Externally referenced functions through dquot_operations in inode. * * Note: this is a blocking operation. */ -void dquot_initialize(struct inode *inode, short type) +void dquot_initialize(struct inode *inode, int type) { struct dquot *dquot[MAXQUOTAS]; unsigned int id = 0; - short cnt; + int cnt; if (IS_NOQUOTA(inode)) return; @@ -1002,7 +996,7 @@ void dquot_drop(struct inode *inode) { struct dquot *dquot; - short cnt; + int cnt; inode->i_flags &= ~S_QUOTA; for (cnt = 0; cnt < MAXQUOTAS; cnt++) { @@ -1017,7 +1011,7 @@ /* * This operation can block, but only after everything is updated */ -int dquot_alloc_block(struct inode *inode, unsigned long number, char warn) +int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) { int cnt, ret = NO_QUOTA; struct dquot *dquot[MAXQUOTAS]; @@ -1038,16 +1032,16 @@ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (dquot[cnt] == NODQUOT) continue; - dquot_incr_blocks(dquot[cnt], number); + dquot_incr_space(dquot[cnt], number); } - inode->i_blocks += number << (BLOCK_SIZE_BITS - 9); + inode_add_bytes(inode, number); /* NOBLOCK End */ ret = QUOTA_OK; warn_put_all: flush_warnings(dquot, warntype); for (cnt = 0; cnt < MAXQUOTAS; cnt++) if (dquot[cnt] != NODQUOT) - dqput(dquot[cnt]); + dqputduplicate(dquot[cnt]); return ret; } @@ -1084,16 +1078,16 @@ flush_warnings(dquot, warntype); for (cnt = 0; cnt < MAXQUOTAS; cnt++) if (dquot[cnt] != NODQUOT) - dqput(dquot[cnt]); + dqputduplicate(dquot[cnt]); return ret; } /* * This is a non-blocking operation. */ -void dquot_free_block(struct inode *inode, unsigned long number) +void dquot_free_space(struct inode *inode, qsize_t number) { - unsigned short cnt; + unsigned int cnt; struct dquot *dquot; /* NOBLOCK Start */ @@ -1101,10 +1095,10 @@ dquot = dqduplicate(inode->i_dquot[cnt]); if (dquot == NODQUOT) continue; - dquot_decr_blocks(dquot, number); - dqput(dquot); + dquot_decr_space(dquot, number); + dqputduplicate(dquot); } - inode->i_blocks -= number << (BLOCK_SIZE_BITS - 9); + inode_sub_bytes(inode, number); /* NOBLOCK End */ } @@ -1113,7 +1107,7 @@ */ void dquot_free_inode(const struct inode *inode, unsigned long number) { - unsigned short cnt; + unsigned int cnt; struct dquot *dquot; /* NOBLOCK Start */ @@ -1122,7 +1116,7 @@ if (dquot == NODQUOT) continue; dquot_decr_inodes(dquot, number); - dqput(dquot); + dqputduplicate(dquot); } /* NOBLOCK End */ } @@ -1134,7 +1128,7 @@ */ int dquot_transfer(struct inode *inode, struct iattr *iattr) { - unsigned long blocks; + qsize_t space; struct dquot *transfer_from[MAXQUOTAS]; struct dquot *transfer_to[MAXQUOTAS]; int cnt, ret = NO_QUOTA, chuid = (iattr->ia_valid & ATTR_UID) && inode->i_uid != iattr->ia_uid, @@ -1164,7 +1158,7 @@ } } /* NOBLOCK START: From now on we shouldn't block */ - blocks = (inode->i_blocks >> 1); + space = inode_get_bytes(inode); /* Build the transfer_from list and check the limits */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { /* The second test can fail when quotaoff is in progress... */ @@ -1174,7 +1168,7 @@ if (transfer_from[cnt] == NODQUOT) /* Can happen on quotafiles (quota isn't initialized on them)... */ continue; if (check_idq(transfer_to[cnt], 1, warntype+cnt) == NO_QUOTA || - check_bdq(transfer_to[cnt], blocks, 0, warntype+cnt) == NO_QUOTA) + check_bdq(transfer_to[cnt], space, 0, warntype+cnt) == NO_QUOTA) goto warn_put_all; } @@ -1189,10 +1183,10 @@ continue; dquot_decr_inodes(transfer_from[cnt], 1); - dquot_decr_blocks(transfer_from[cnt], blocks); + dquot_decr_space(transfer_from[cnt], space); dquot_incr_inodes(transfer_to[cnt], 1); - dquot_incr_blocks(transfer_to[cnt], blocks); + dquot_incr_space(transfer_to[cnt], space); if (inode->i_dquot[cnt] == NODQUOT) BUG(); @@ -1208,39 +1202,29 @@ warn_put_all: flush_warnings(transfer_to, warntype); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + /* First we must put duplicate - otherwise we might deadlock */ if (transfer_to[cnt] != NODQUOT) - dqput(transfer_to[cnt]); + dqputduplicate(transfer_to[cnt]); if (transfer_from[cnt] != NODQUOT) dqput(transfer_from[cnt]); } return ret; } -static int __init dquot_init(void) -{ - int i; - - for (i = 0; i < NR_DQHASH; i++) - INIT_LIST_HEAD(dquot_hash + i); - printk(KERN_NOTICE "VFS: Diskquotas version %s initialized\n", __DQUOT_VERSION__); - return 0; -} -__initcall(dquot_init); - /* * Definitions of diskquota operations. */ struct dquot_operations dquot_operations = { - dquot_initialize, /* mandatory */ - dquot_drop, /* mandatory */ - dquot_alloc_block, - dquot_alloc_inode, - dquot_free_block, - dquot_free_inode, - dquot_transfer + initialize: dquot_initialize, /* mandatory */ + drop: dquot_drop, /* mandatory */ + alloc_space: dquot_alloc_space, + alloc_inode: dquot_alloc_inode, + free_space: dquot_free_space, + free_inode: dquot_free_inode, + transfer: dquot_transfer }; -static inline void set_enable_flags(struct quota_mount_options *dqopt, short type) +static inline void set_enable_flags(struct quota_info *dqopt, int type) { switch (type) { case USRQUOTA: @@ -1252,7 +1236,7 @@ } } -static inline void reset_enable_flags(struct quota_mount_options *dqopt, short type) +static inline void reset_enable_flags(struct quota_info *dqopt, int type) { switch (type) { case USRQUOTA: @@ -1265,16 +1249,15 @@ } /* Function in inode.c - remove pointers to dquots in icache */ -extern void remove_dquot_ref(struct super_block *, short); +extern void remove_dquot_ref(struct super_block *, int); /* * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) */ -int quota_off(struct super_block *sb, short type) +int vfs_quota_off(struct super_block *sb, int type) { - struct file *filp; - short cnt; - struct quota_mount_options *dqopt = sb_dqopt(sb); + int cnt; + struct quota_info *dqopt = sb_dqopt(sb); lock_kernel(); if (!sb) @@ -1292,51 +1275,48 @@ /* Note: these are blocking operations */ remove_dquot_ref(sb, cnt); invalidate_dquots(sb, cnt); + if (info_dirty(&dqopt->info[cnt])) + dqopt->ops[cnt]->write_file_info(sb, cnt); + if (dqopt->ops[cnt]->free_file_info) + dqopt->ops[cnt]->free_file_info(sb, cnt); + put_quota_format(dqopt->info[cnt].dqi_format); - filp = dqopt->files[cnt]; + fput(dqopt->files[cnt]); dqopt->files[cnt] = (struct file *)NULL; - dqopt->inode_expire[cnt] = 0; - dqopt->block_expire[cnt] = 0; - fput(filp); - } + dqopt->info[cnt].dqi_flags = 0; + dqopt->info[cnt].dqi_igrace = 0; + dqopt->info[cnt].dqi_bgrace = 0; + dqopt->ops[cnt] = NULL; + } up(&dqopt->dqoff_sem); out: unlock_kernel(); return 0; } -static inline int check_quotafile_size(loff_t size) -{ - ulong blocks = size >> BLOCK_SIZE_BITS; - size_t off = size & (BLOCK_SIZE - 1); - - return !(((blocks % sizeof(struct dqblk)) * BLOCK_SIZE + off % sizeof(struct dqblk)) % sizeof(struct dqblk)); -} - -static int quota_on(struct super_block *sb, short type, char *path) +int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path) { - struct file *f; + struct file *f = NULL; struct inode *inode; - struct dquot *dquot; - struct quota_mount_options *dqopt = sb_dqopt(sb); - char *tmp; + struct quota_info *dqopt = sb_dqopt(sb); + struct quota_format_type *fmt = find_quota_format(format_id); int error; - if (is_enabled(dqopt, type)) - return -EBUSY; + if (!fmt) + return -ESRCH; + if (is_enabled(dqopt, type)) { + error = -EBUSY; + goto out_fmt; + } down(&dqopt->dqoff_sem); - tmp = getname(path); - error = PTR_ERR(tmp); - if (IS_ERR(tmp)) - goto out_lock; - f = filp_open(tmp, O_RDWR, 0600); - putname(tmp); + f = filp_open(path, O_RDWR, 0600); error = PTR_ERR(f); if (IS_ERR(f)) goto out_lock; + dqopt->files[type] = f; error = -EIO; if (!f->f_op || !f->f_op->read || !f->f_op->write) goto out_f; @@ -1345,134 +1325,197 @@ if (!S_ISREG(inode->i_mode)) goto out_f; error = -EINVAL; - if (inode->i_size == 0 || !check_quotafile_size(inode->i_size)) + if (!fmt->qf_ops->check_quota_file(sb, type)) goto out_f; /* We don't want quota on quota files */ dquot_drop(inode); inode->i_flags |= S_NOQUOTA; - dqopt->files[type] = f; - sb->dq_op = &dquot_operations; + dqopt->ops[type] = fmt->qf_ops; + dqopt->info[type].dqi_format = fmt; + if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) + goto out_f; set_enable_flags(dqopt, type); - dquot = dqget(sb, 0, type); - dqopt->inode_expire[type] = (dquot != NODQUOT) ? dquot->dq_itime : MAX_IQ_TIME; - dqopt->block_expire[type] = (dquot != NODQUOT) ? dquot->dq_btime : MAX_DQ_TIME; - dqput(dquot); - add_dquot_ref(sb, type); up(&dqopt->dqoff_sem); return 0; out_f: - filp_close(f, NULL); + if (f) + filp_close(f, NULL); + dqopt->files[type] = NULL; out_lock: up(&dqopt->dqoff_sem); +out_fmt: + put_quota_format(fmt); return error; } -/* - * This is the system call interface. This communicates with - * the user-level programs. Currently this only supports diskquota - * calls. Maybe we need to add the process quotas etc. in the future, - * but we probably should use rlimits for that. - */ -asmlinkage long sys_quotactl(int cmd, const char *special, int id, caddr_t addr) +/* Generic routine for getting common part of quota structure */ +static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di) { - int cmds = 0, type = 0, flags = 0; - kdev_t dev; - struct super_block *sb = NULL; - int ret = -EINVAL; + struct mem_dqblk *dm = &dquot->dq_dqb; - lock_kernel(); - cmds = cmd >> SUBCMDSHIFT; - type = cmd & SUBCMDMASK; + di->dqb_bhardlimit = dm->dqb_bhardlimit; + di->dqb_bsoftlimit = dm->dqb_bsoftlimit; + di->dqb_curspace = dm->dqb_curspace; + di->dqb_ihardlimit = dm->dqb_ihardlimit; + di->dqb_isoftlimit = dm->dqb_isoftlimit; + di->dqb_curinodes = dm->dqb_curinodes; + di->dqb_btime = dm->dqb_btime; + di->dqb_itime = dm->dqb_itime; + di->dqb_valid = QIF_ALL; +} - if ((u_int) type >= MAXQUOTAS) - goto out; - if (id & ~0xFFFF) - goto out; +int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) +{ + struct dquot *dquot = dqget(sb, id, type); - ret = -EPERM; - switch (cmds) { - case Q_SYNC: - case Q_GETSTATS: - break; - case Q_GETQUOTA: - if (((type == USRQUOTA && current->euid != id) || - (type == GRPQUOTA && !in_egroup_p(id))) && - !capable(CAP_SYS_ADMIN)) - goto out; - break; - default: - if (!capable(CAP_SYS_ADMIN)) - goto out; - } - - ret = -EINVAL; - dev = NODEV; - if (special != NULL || (cmds != Q_SYNC && cmds != Q_GETSTATS)) { - mode_t mode; - struct nameidata nd; - - ret = user_path_walk(special, &nd); - if (ret) - goto out; - - dev = nd.dentry->d_inode->i_rdev; - mode = nd.dentry->d_inode->i_mode; - path_release(&nd); - - ret = -ENOTBLK; - if (!S_ISBLK(mode)) - goto out; - ret = -ENODEV; - sb = get_super(dev); - if (!sb) - goto out; - } - - ret = -EINVAL; - switch (cmds) { - case Q_QUOTAON: - ret = quota_on(sb, type, (char *) addr); - goto out; - case Q_QUOTAOFF: - ret = quota_off(sb, type); - goto out; - case Q_GETQUOTA: - ret = get_quota(sb, id, type, (struct dqblk *) addr); - goto out; - case Q_SETQUOTA: - flags |= SET_QUOTA; - break; - case Q_SETUSE: - flags |= SET_USE; - break; - case Q_SETQLIM: - flags |= SET_QLIMIT; - break; - case Q_SYNC: - ret = sync_dquots(dev, type); - goto out; - case Q_GETSTATS: - ret = get_stats(addr); - goto out; - case Q_RSQUASH: - ret = quota_root_squash(sb, type, (int *) addr); - goto out; - default: - goto out; - } - - ret = -NODEV; - if (sb && sb_has_quota_enabled(sb, type)) - ret = set_dqblk(sb, id, type, flags, (struct dqblk *) addr); -out: - if (sb) - drop_super(sb); - unlock_kernel(); - return ret; + if (!dquot) + return -EINVAL; + do_get_dqblk(dquot, di); + dqput(dquot); + return 0; +} + +/* Generic routine for setting common part of quota structure */ +static void do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) +{ + struct mem_dqblk *dm = &dquot->dq_dqb; + int check_blim = 0, check_ilim = 0; + + if (di->dqb_valid & QIF_SPACE) { + dm->dqb_curspace = di->dqb_curspace; + check_blim = 1; + } + if (di->dqb_valid & QIF_BLIMITS) { + dm->dqb_bsoftlimit = di->dqb_bsoftlimit; + dm->dqb_bhardlimit = di->dqb_bhardlimit; + check_blim = 1; + } + if (di->dqb_valid & QIF_INODES) { + dm->dqb_curinodes = di->dqb_curinodes; + check_ilim = 1; + } + if (di->dqb_valid & QIF_ILIMITS) { + dm->dqb_isoftlimit = di->dqb_isoftlimit; + dm->dqb_ihardlimit = di->dqb_ihardlimit; + check_ilim = 1; + } + if (di->dqb_valid & QIF_BTIME) + dm->dqb_btime = di->dqb_btime; + if (di->dqb_valid & QIF_ITIME) + dm->dqb_itime = di->dqb_itime; + + if (check_blim) { + if (!dm->dqb_bsoftlimit || toqb(dm->dqb_curspace) < dm->dqb_bsoftlimit) { + dm->dqb_btime = 0; + dquot->dq_flags &= ~DQ_BLKS; + } + else if (!(di->dqb_valid & QIF_BTIME)) /* Set grace only if user hasn't provided his own... */ + dm->dqb_btime = CURRENT_TIME + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace; + } + if (check_ilim) { + if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) { + dm->dqb_itime = 0; + dquot->dq_flags &= ~DQ_INODES; + } + else if (!(di->dqb_valid & QIF_ITIME)) /* Set grace only if user hasn't provided his own... */ + dm->dqb_itime = CURRENT_TIME + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; + } + if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit) + dquot->dq_flags &= ~DQ_FAKE; + else + dquot->dq_flags |= DQ_FAKE; + dquot->dq_flags |= DQ_MOD; +} + +int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) +{ + struct dquot *dquot = dqget(sb, id, type); + + if (!dquot) + return -EINVAL; + do_set_dqblk(dquot, di); + dqput(dquot); + return 0; +} + +/* Generic routine for getting common part of quota file information */ +int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) +{ + struct mem_dqinfo *mi = sb_dqopt(sb)->info + type; + + ii->dqi_bgrace = mi->dqi_bgrace; + ii->dqi_igrace = mi->dqi_igrace; + ii->dqi_flags = mi->dqi_flags & DQF_MASK; + ii->dqi_valid = IIF_ALL; + return 0; +} + +/* Generic routine for setting common part of quota file information */ +int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) +{ + struct mem_dqinfo *mi = sb_dqopt(sb)->info + type; + + if (ii->dqi_valid & IIF_BGRACE) + mi->dqi_bgrace = ii->dqi_bgrace; + if (ii->dqi_valid & IIF_IGRACE) + mi->dqi_igrace = ii->dqi_igrace; + if (ii->dqi_valid & IIF_FLAGS) + mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK); + mark_info_dirty(mi); + return 0; +} + +struct quotactl_ops vfs_quotactl_ops = { + quota_on: vfs_quota_on, + quota_off: vfs_quota_off, + quota_sync: vfs_quota_sync, + get_info: vfs_get_dqinfo, + set_info: vfs_set_dqinfo, + get_dqblk: vfs_get_dqblk, + set_dqblk: vfs_set_dqblk +}; + +static ctl_table fs_dqstats_table[] = { + {FS_DQ_LOOKUPS, "lookups", &dqstats.lookups, sizeof(int), 0444, NULL, &proc_dointvec}, + {FS_DQ_DROPS, "drops", &dqstats.drops, sizeof(int), 0444, NULL, &proc_dointvec}, + {FS_DQ_READS, "reads", &dqstats.reads, sizeof(int), 0444, NULL, &proc_dointvec}, + {FS_DQ_WRITES, "writes", &dqstats.writes, sizeof(int), 0444, NULL, &proc_dointvec}, + {FS_DQ_CACHE_HITS, "cache_hits", &dqstats.cache_hits, sizeof(int), 0444, NULL, &proc_dointvec}, + {FS_DQ_ALLOCATED, "allocated_dquots", &dqstats.allocated_dquots, sizeof(int), 0444, NULL, &proc_dointvec}, + {FS_DQ_FREE, "free_dquots", &dqstats.free_dquots, sizeof(int), 0444, NULL, &proc_dointvec}, + {FS_DQ_SYNCS, "syncs", &dqstats.syncs, sizeof(int), 0444, NULL, &proc_dointvec}, + {}, +}; + +static ctl_table fs_table[] = { + {FS_DQSTATS, "quota", NULL, 0, 0555, fs_dqstats_table}, + {}, +}; + +static ctl_table sys_table[] = { + {CTL_FS, "fs", NULL, 0, 0555, fs_table}, + {}, +}; + +static int __init dquot_init(void) +{ + int i; + + register_sysctl_table(sys_table, 0); + for (i = 0; i < NR_DQHASH; i++) + INIT_LIST_HEAD(dquot_hash + i); + printk(KERN_NOTICE "VFS: Disk quotas v%s\n", __DQUOT_VERSION__); + + return 0; } +__initcall(dquot_init); + +EXPORT_SYMBOL(register_quota_format); +EXPORT_SYMBOL(unregister_quota_format); +EXPORT_SYMBOL(dqstats); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/exec.c linux.21pre4-ac2/fs/exec.c --- linux.21pre4/fs/exec.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/fs/exec.c 2003-01-06 15:38:08.000000000 +0000 @@ -333,6 +333,12 @@ if (!mpnt) return -ENOMEM; + if (!vm_enough_memory((STACK_TOP - (PAGE_MASK & (unsigned long) bprm->p))>>PAGE_SHIFT)) + { + kmem_cache_free(vm_area_cachep, mpnt); + return -ENOMEM; + } + down_write(¤t->mm->mmap_sem); { mpnt->vm_mm = current->mm; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/ext2/balloc.c linux.21pre4-ac2/fs/ext2/balloc.c --- linux.21pre4/fs/ext2/balloc.c 2003-01-29 16:26:30.000000000 +0000 +++ linux.21pre4-ac2/fs/ext2/balloc.c 2003-01-06 17:30:06.000000000 +0000 @@ -269,7 +269,8 @@ } lock_super (sb); es = sb->u.ext2_sb.s_es; - if (block < le32_to_cpu(es->s_first_data_block) || + if (block < le32_to_cpu(es->s_first_data_block) || + block + count < block || (block + count) > le32_to_cpu(es->s_blocks_count)) { ext2_error (sb, "ext2_free_blocks", "Freeing blocks not in datazone - " @@ -302,22 +303,20 @@ if (!gdp) goto error_return; - if (in_range (le32_to_cpu(gdp->bg_block_bitmap), block, count) || - in_range (le32_to_cpu(gdp->bg_inode_bitmap), block, count) || - in_range (block, le32_to_cpu(gdp->bg_inode_table), - sb->u.ext2_sb.s_itb_per_group) || - in_range (block + count - 1, le32_to_cpu(gdp->bg_inode_table), - sb->u.ext2_sb.s_itb_per_group)) - ext2_error (sb, "ext2_free_blocks", - "Freeing blocks in system zones - " - "Block = %lu, count = %lu", - block, count); + for (i = 0; i < count; i++, block++) { + if (block == le32_to_cpu(gdp->bg_block_bitmap) || + block == le32_to_cpu(gdp->bg_inode_bitmap) || + in_range(block, le32_to_cpu(gdp->bg_inode_table), + EXT2_SB(sb)->s_itb_per_group)) { + ext2_error(sb, __FUNCTION__, + "Freeing block in system zone - block = %lu", + block); + continue; + } - for (i = 0; i < count; i++) { if (!ext2_clear_bit (bit + i, bh->b_data)) - ext2_error (sb, "ext2_free_blocks", - "bit already cleared for block %lu", - block + i); + ext2_error(sb, __FUNCTION__, + "bit already cleared for block %lu", block); else { DQUOT_FREE_BLOCK(inode, 1); gdp->bg_free_blocks_count = @@ -336,7 +335,6 @@ wait_on_buffer (bh); } if (overflow) { - block += count; count = overflow; goto do_more; } @@ -521,9 +519,14 @@ tmp == le32_to_cpu(gdp->bg_inode_bitmap) || in_range (tmp, le32_to_cpu(gdp->bg_inode_table), sb->u.ext2_sb.s_itb_per_group)) + { ext2_error (sb, "ext2_new_block", - "Allocating block in system zone - " - "block = %u", tmp); + "Allocating block in system zone - block = %u", + tmp); + ext2_set_bit(j, bh->b_data); + DQUOT_FREE_BLOCK(inode, 1); + goto repeat; + } if (ext2_set_bit (j, bh->b_data)) { ext2_warning (sb, "ext2_new_block", diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/ext2/inode.c linux.21pre4-ac2/fs/ext2/inode.c --- linux.21pre4/fs/ext2/inode.c 2003-01-29 16:26:30.000000000 +0000 +++ linux.21pre4-ac2/fs/ext2/inode.c 2003-01-06 22:06:51.000000000 +0000 @@ -592,8 +592,9 @@ { return generic_block_bmap(mapping,block,ext2_get_block); } -static int ext2_direct_IO(int rw, struct inode * inode, struct kiobuf * iobuf, unsigned long blocknr, int blocksize) +static int ext2_direct_IO(int rw, struct file * filp, struct kiobuf * iobuf, unsigned long blocknr, int blocksize) { + struct inode * inode = filp->f_dentry->d_inode->i_mapping->host; return generic_direct_IO(rw, inode, iobuf, blocknr, blocksize, ext2_get_block); } struct address_space_operations ext2_aops = { diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/ext3/inode.c linux.21pre4-ac2/fs/ext3/inode.c --- linux.21pre4/fs/ext3/inode.c 2003-01-29 17:07:46.000000000 +0000 +++ linux.21pre4-ac2/fs/ext3/inode.c 2003-02-04 15:05:53.000000000 +0000 @@ -87,6 +87,34 @@ return err; } +/* + * Work out how many blocks we need to progress with the next chunk of a + * truncate transaction. + */ + +static unsigned long blocks_for_truncate(struct inode *inode) +{ + unsigned long needed; + + needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); + + /* Give ourselves just enough room to cope with inodes in which + * i_blocks is corrupt: we've seen disk corruptions in the past + * which resulted in random data in an inode which looked enough + * like a regular file for ext3 to try to delete it. Things + * will go a bit crazy if that happens, but at least we should + * try not to panic the whole kernel. */ + if (needed < 2) + needed = 2; + + /* But we need to bound the transaction so we don't overflow the + * journal. */ + if (needed > EXT3_MAX_TRANS_DATA) + needed = EXT3_MAX_TRANS_DATA; + + return EXT3_DATA_TRANS_BLOCKS + needed; +} + /* * Truncate transactions can be complex and absolutely huge. So we need to * be able to restart the transaction at a conventient checkpoint to make @@ -100,14 +128,9 @@ static handle_t *start_transaction(struct inode *inode) { - long needed; handle_t *result; - needed = inode->i_blocks; - if (needed > EXT3_MAX_TRANS_DATA) - needed = EXT3_MAX_TRANS_DATA; - - result = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS + needed); + result = ext3_journal_start(inode, blocks_for_truncate(inode)); if (!IS_ERR(result)) return result; @@ -123,14 +146,9 @@ */ static int try_to_extend_transaction(handle_t *handle, struct inode *inode) { - long needed; - if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS) return 0; - needed = inode->i_blocks; - if (needed > EXT3_MAX_TRANS_DATA) - needed = EXT3_MAX_TRANS_DATA; - if (!ext3_journal_extend(handle, EXT3_RESERVE_TRANS_BLOCKS + needed)) + if (!ext3_journal_extend(handle, blocks_for_truncate(inode))) return 0; return 1; } @@ -142,11 +160,8 @@ */ static int ext3_journal_test_restart(handle_t *handle, struct inode *inode) { - long needed = inode->i_blocks; - if (needed > EXT3_MAX_TRANS_DATA) - needed = EXT3_MAX_TRANS_DATA; jbd_debug(2, "restarting handle %p\n", handle); - return ext3_journal_restart(handle, EXT3_DATA_TRANS_BLOCKS + needed); + return ext3_journal_restart(handle, blocks_for_truncate(inode)); } /* diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/inode.c linux.21pre4-ac2/fs/inode.c --- linux.21pre4/fs/inode.c 2003-01-29 17:07:57.000000000 +0000 +++ linux.21pre4-ac2/fs/inode.c 2003-01-06 23:22:12.000000000 +0000 @@ -1201,9 +1201,9 @@ /* Functions back in dquot.c */ void put_dquot_list(struct list_head *); -int remove_inode_dquot_ref(struct inode *, short, struct list_head *); +int remove_inode_dquot_ref(struct inode *, int, struct list_head *); -void remove_dquot_ref(struct super_block *sb, short type) +void remove_dquot_ref(struct super_block *sb, int type) { struct inode *inode; struct list_head *act_head; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/ioctl.c linux.21pre4-ac2/fs/ioctl.c --- linux.21pre4/fs/ioctl.c 2003-01-29 16:26:30.000000000 +0000 +++ linux.21pre4-ac2/fs/ioctl.c 2003-01-06 23:22:17.000000000 +0000 @@ -101,6 +101,16 @@ filp->f_flags &= ~FASYNC; break; + case FIOQSIZE: + if (S_ISDIR(filp->f_dentry->d_inode->i_mode) || + S_ISREG(filp->f_dentry->d_inode->i_mode) || + S_ISLNK(filp->f_dentry->d_inode->i_mode)) { + loff_t res = inode_get_bytes(filp->f_dentry->d_inode); + error = copy_to_user((loff_t *)arg, &res, sizeof(res)) ? -EFAULT : 0; + } + else + error = -ENOTTY; + break; default: error = -ENOTTY; if (S_ISREG(filp->f_dentry->d_inode->i_mode)) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/jbd/journal.c linux.21pre4-ac2/fs/jbd/journal.c --- linux.21pre4/fs/jbd/journal.c 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/fs/jbd/journal.c 2003-01-07 00:38:20.000000000 +0000 @@ -1803,9 +1803,9 @@ if (buffer_jbd(bh)) { /* Someone did it for us! */ - J_ASSERT_BH(bh, bh->b_private != NULL); + J_ASSERT_BH(bh, bh->b_journal_head != NULL); journal_free_journal_head(jh); - jh = bh->b_private; + jh = bh->b_journal_head; } else { /* * We actually don't need jh_splice_lock when @@ -1813,7 +1813,7 @@ */ spin_lock(&jh_splice_lock); set_bit(BH_JBD, &bh->b_state); - bh->b_private = jh; + bh->b_journal_head = jh; jh->b_bh = bh; atomic_inc(&bh->b_count); spin_unlock(&jh_splice_lock); @@ -1822,7 +1822,7 @@ } jh->b_jcount++; spin_unlock(&journal_datalist_lock); - return bh->b_private; + return bh->b_journal_head; } /* @@ -1855,7 +1855,7 @@ J_ASSERT_BH(bh, jh2bh(jh) == bh); BUFFER_TRACE(bh, "remove journal_head"); spin_lock(&jh_splice_lock); - bh->b_private = NULL; + bh->b_journal_head = NULL; jh->b_bh = NULL; /* debug, really */ clear_bit(BH_JBD, &bh->b_state); __brelse(bh); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/jbd/transaction.c linux.21pre4-ac2/fs/jbd/transaction.c --- linux.21pre4/fs/jbd/transaction.c 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/fs/jbd/transaction.c 2003-01-29 17:23:25.000000000 +0000 @@ -1440,6 +1440,7 @@ if (handle->h_sync) { do { old_handle_count = transaction->t_handle_count; + set_current_state(TASK_RUNNING); yield(); } while (old_handle_count != transaction->t_handle_count); } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/jffs2/background.c linux.21pre4-ac2/fs/jffs2/background.c --- linux.21pre4/fs/jffs2/background.c 2003-01-29 16:26:32.000000000 +0000 +++ linux.21pre4-ac2/fs/jffs2/background.c 2003-01-06 19:15:41.000000000 +0000 @@ -106,9 +106,6 @@ sprintf(current->comm, "jffs2_gcd_mtd%d", c->mtd->index); - /* FIXME in the 2.2 backport */ - current->nice = 10; - for (;;) { spin_lock_irq(¤t->sigmask_lock); siginitsetinv (¤t->blocked, sigmask(SIGHUP) | sigmask(SIGKILL) | sigmask(SIGSTOP) | sigmask(SIGCONT)); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/jfs/inode.c linux.21pre4-ac2/fs/jfs/inode.c --- linux.21pre4/fs/jfs/inode.c 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/fs/jfs/inode.c 2003-01-29 17:23:25.000000000 +0000 @@ -328,9 +328,10 @@ return generic_block_bmap(mapping, block, jfs_get_block); } -static int jfs_direct_IO(int rw, struct inode *inode, struct kiobuf *iobuf, +static int jfs_direct_IO(int rw, struct file *filp, struct kiobuf *iobuf, unsigned long blocknr, int blocksize) { + struct inode * inode = filp->f_dentry->d_inode->i_mapping->host; return generic_direct_IO(rw, inode, iobuf, blocknr, blocksize, jfs_get_block); } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/lockd/svclock.c linux.21pre4-ac2/fs/lockd/svclock.c --- linux.21pre4/fs/lockd/svclock.c 2003-01-29 16:26:31.000000000 +0000 +++ linux.21pre4-ac2/fs/lockd/svclock.c 2003-01-30 16:47:13.000000000 +0000 @@ -62,8 +62,8 @@ nlmsvc_remove_block(block); bp = &nlm_blocked; if (when != NLM_NEVER) { - if ((when += jiffies) == NLM_NEVER) - when ++; + if ((when += jiffies) > NLM_NEVER) + when = NLM_NEVER; while ((b = *bp) && time_before_eq(b->b_when,when)) bp = &b->b_next; } else @@ -176,8 +176,14 @@ struct nlm_rqst *call; /* Create host handle for callback */ + /* We must up the semaphore in case the host lookup does + * garbage collection (which calls nlmsvc_traverse_blocks), + * but this shouldn't be a problem because nlmsvc_lock has + * to retry the lock after this anyway */ + up(&file->f_sema); host = nlmclnt_lookup_host(&rqstp->rq_addr, rqstp->rq_prot, rqstp->rq_vers); + down(&file->f_sema); if (host == NULL) return NULL; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/lockd/svcproc.c linux.21pre4-ac2/fs/lockd/svcproc.c --- linux.21pre4/fs/lockd/svcproc.c 2003-01-29 16:26:31.000000000 +0000 +++ linux.21pre4-ac2/fs/lockd/svcproc.c 2003-01-06 17:30:20.000000000 +0000 @@ -345,6 +345,15 @@ return stat; } +static int +nlmsvc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp, + void *resp) +{ + dprintk("lockd: GRANTED_RES called\n"); + nlmsvc_grant_reply(&argp->cookie, argp->status); + return 0; +} + /* * SHARE: create a DOS share or alter existing share. */ @@ -545,14 +554,12 @@ #define nlmsvc_decode_lockres nlmsvc_decode_void #define nlmsvc_decode_unlockres nlmsvc_decode_void #define nlmsvc_decode_cancelres nlmsvc_decode_void -#define nlmsvc_decode_grantedres nlmsvc_decode_void #define nlmsvc_proc_none nlmsvc_proc_null #define nlmsvc_proc_test_res nlmsvc_proc_null #define nlmsvc_proc_lock_res nlmsvc_proc_null #define nlmsvc_proc_cancel_res nlmsvc_proc_null #define nlmsvc_proc_unlock_res nlmsvc_proc_null -#define nlmsvc_proc_granted_res nlmsvc_proc_null struct nlm_void { int dummy; }; @@ -589,7 +596,7 @@ PROC(lock_res, lockres, norep, res, void, 1), PROC(cancel_res, cancelres, norep, res, void, 1), PROC(unlock_res, unlockres, norep, res, void, 1), - PROC(granted_res, grantedres, norep, res, void, 1), + PROC(granted_res, res, norep, res, void, 1), /* statd callback */ PROC(sm_notify, reboot, void, reboot, void, 1), PROC(none, void, void, void, void, 1), diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/locks.c linux.21pre4-ac2/fs/locks.c --- linux.21pre4/fs/locks.c 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/fs/locks.c 2003-01-06 17:30:20.000000000 +0000 @@ -935,8 +935,11 @@ goto next_lock; /* If the next lock in the list has entirely bigger * addresses than the new one, insert the lock here. + * + * be careful if fl_end == OFFSET_MAX --okir */ - if (fl->fl_start > caller->fl_end + 1) + if (fl->fl_start > caller->fl_end + 1 + && caller->fl_end != OFFSET_MAX) break; /* If we come here, the new and old lock are of the diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/Makefile linux.21pre4-ac2/fs/Makefile --- linux.21pre4/fs/Makefile 2003-01-29 16:26:30.000000000 +0000 +++ linux.21pre4-ac2/fs/Makefile 2003-01-06 23:22:49.000000000 +0000 @@ -7,20 +7,19 @@ O_TARGET := fs.o -export-objs := filesystems.o open.o dcache.o buffer.o +export-objs := filesystems.o open.o dcache.o buffer.o dquot.o mod-subdirs := nls obj-y := open.o read_write.o devices.o file_table.o buffer.o \ super.o block_dev.o char_dev.o stat.o exec.o pipe.o namei.o \ fcntl.o ioctl.o readdir.o select.o fifo.o locks.o \ dcache.o inode.o attr.o bad_inode.o file.o iobuf.o dnotify.o \ - filesystems.o namespace.o seq_file.o xattr.o + filesystems.o namespace.o seq_file.o quota.o xattr.o -ifeq ($(CONFIG_QUOTA),y) -obj-y += dquot.o -else -obj-y += noquot.o -endif + +obj-$(CONFIG_QUOTA) += dquot.o +obj-$(CONFIG_QFMT_V1) += quota_v1.o +obj-$(CONFIG_QFMT_V2) += quota_v2.o subdir-$(CONFIG_PROC_FS) += proc subdir-y += partitions @@ -71,6 +70,7 @@ obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o +obj-$(CONFIG_BINFMT_SOM) += binfmt_som.o obj-$(CONFIG_BINFMT_EM86) += binfmt_em86.o obj-$(CONFIG_BINFMT_MISC) += binfmt_misc.o diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/nfs/direct.c linux.21pre4-ac2/fs/nfs/direct.c --- linux.21pre4/fs/nfs/direct.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/fs/nfs/direct.c 2003-01-06 22:07:26.000000000 +0000 @@ -0,0 +1,374 @@ +/* + * linux/fs/nfs/direct.c + * + * High-performance direct I/O for the NFS client + * + * When an application requests uncached I/O, all read and write requests + * are made directly to the server; data stored or fetched via these + * requests is not cached in the Linux page cache. The client does not + * correct unaligned requests from applications. All requested bytes are + * held on permanent storage before a direct write system call returns to + * an application. Applications that manage their own data caching, such + * as databases, make very good use of direct I/O on local file systems. + * + * Solaris implements an uncached I/O facility called directio() that + * is used for backups and sequential I/O to very large files. Solaris + * also supports uncaching whole NFS partitions with "-o forcedirectio," + * an undocumented mount option. + * + * Note that I/O to read in executables (e.g. kernel_read) cannot use + * direct (kiobuf) reads because there is no vma backing the passed-in + * data buffer. + * + * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust. + * + * Initial implementation: 12/2001 by Chuck Lever + * + * TODO: + * + * 1. Use concurrent asynchronous network requests rather than + * serialized synchronous network requests for normal (non-sync) + * direct I/O. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define NFSDBG_FACILITY (NFSDBG_PAGECACHE | NFSDBG_VFS) +#define VERF_SIZE (2 * sizeof(__u32)) + +static /* inline */ int +nfs_direct_read_rpc(struct file *file, struct nfs_readargs *arg) +{ + int result; + struct inode * inode = file->f_dentry->d_inode; + struct nfs_fattr fattr; + struct rpc_message msg; + struct nfs_readres res = { &fattr, arg->count, 0 }; + +#ifdef CONFIG_NFS_V3 + msg.rpc_proc = (NFS_PROTO(inode)->version == 3) ? + NFS3PROC_READ : NFSPROC_READ; +#else + msg.rpc_proc = NFSPROC_READ; +#endif + msg.rpc_argp = arg; + msg.rpc_resp = &res; + + lock_kernel(); + msg.rpc_cred = nfs_file_cred(file); + fattr.valid = 0; + result = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); + nfs_refresh_inode(inode, &fattr); + unlock_kernel(); + + return result; +} + +static /* inline */ int +nfs_direct_write_rpc(struct file *file, struct nfs_writeargs *arg, + struct nfs_writeverf *verf) +{ + int result; + struct inode *inode = file->f_dentry->d_inode; + struct nfs_fattr fattr; + struct rpc_message msg; + struct nfs_writeres res = { &fattr, verf, 0 }; + +#ifdef CONFIG_NFS_V3 + msg.rpc_proc = (NFS_PROTO(inode)->version == 3) ? + NFS3PROC_WRITE : NFSPROC_WRITE; +#else + msg.rpc_proc = NFSPROC_WRITE; +#endif + msg.rpc_argp = arg; + msg.rpc_resp = &res; + + lock_kernel(); + msg.rpc_cred = get_rpccred(nfs_file_cred(file)); + fattr.valid = 0; + result = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); + nfs_write_attributes(inode, &fattr); + put_rpccred(msg.rpc_cred); + unlock_kernel(); + +#ifdef CONFIG_NFS_V3 + if (NFS_PROTO(inode)->version == 3) { + if (result > 0) { + if ((arg->stable == NFS_FILE_SYNC) && + (verf->committed != NFS_FILE_SYNC)) { + printk(KERN_ERR __FUNCTION__ + ": server didn't sync stable write request\n"); + return -EIO; + } + + if (result != arg->count) { + printk(KERN_INFO __FUNCTION__ + ": short write, count=%u, result=%d\n", + arg->count, result); + } + } + return result; + } else { +#endif + verf->committed = NFS_FILE_SYNC; /* NFSv2 always syncs data */ + if (result == 0) + return arg->count; + return result; +#ifdef CONFIG_NFS_V3 + } +#endif +} + +#ifdef CONFIG_NFS_V3 +static /* inline */ int +nfs_direct_commit_rpc(struct inode *inode, loff_t offset, size_t count, + struct nfs_writeverf *verf) +{ + int result; + struct nfs_fattr fattr; + struct nfs_writeargs arg = { NFS_FH(inode), offset, count, 0, 0, + NULL }; + struct nfs_writeres res = { &fattr, verf, 0 }; + struct rpc_message msg = { NFS3PROC_COMMIT, &arg, &res, NULL }; + + fattr.valid = 0; + + lock_kernel(); + result = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); + nfs_write_attributes(inode, &fattr); + unlock_kernel(); + + return result; +} +#else +static inline int +nfs_direct_commit_rpc(struct inode *inode, loff_t offset, size_t count, + struct nfs_writeverf *verf) +{ + return 0; +} +#endif + +/* + * Walk through the iobuf and create an iovec for each "rsize" bytes. + */ +static int +nfs_direct_read(struct file *file, struct kiobuf *iobuf, loff_t offset, + size_t count) +{ + int curpage, total; + struct inode *inode = file->f_dentry->d_inode; + int rsize = NFS_SERVER(inode)->rsize; + struct page **src = iobuf->maplist, + **end = iobuf->maplist + iobuf->nr_pages; + struct page *pages[NFS_READ_MAXIOV]; + struct nfs_readargs args = { NFS_FH(inode), + offset, + 0, + iobuf->offset, + pages }; + + total = 0; + curpage = 0; + while (count) { + int request, result; + struct page **dst = pages; + + request = count; + if (count > rsize) + request = rsize; + args.count = request; + + do { + if (!*src) + return -EFAULT; + + *dst++ = *src; + /* zero after the first iov */ + if (request < PAGE_SIZE) + break; + request -= PAGE_SIZE; + src++; + } while (request != 0 && src != end); + + result = nfs_direct_read_rpc(file, &args); + + if (result < 0) { + if (result == -EISDIR) + total = -EINVAL; + else + total = result; + break; + } + + total += result; + if (result < args.count) /* NFSv2ism */ + break; + count -= result; + args.offset += result; + args.pgbase += result; + args.pgbase &= ~PAGE_MASK; + }; + return total; +} + +/* + * Walk through the iobuf and create an iovec for each "wsize" bytes. + * If only one network write is necessary, or if the O_SYNC flag or + * 'sync' mount option are present, or if this is a V2 inode, use + * FILE_SYNC. Otherwise, use UNSTABLE and finish with a COMMIT. + * + * The mechanics of this function are much the same as nfs_direct_read, + * with the added complexity of committing unstable writes. + */ +static int +nfs_direct_write(struct file *file, struct kiobuf *iobuf, + loff_t offset, size_t count) +{ + int curpage, total; + int need_commit = 0; + loff_t save_offset = offset; + struct inode *inode = file->f_dentry->d_inode; + int wsize = NFS_SERVER(inode)->wsize; + struct nfs_writeverf first_verf, ret_verf; + struct page *pages[NFS_WRITE_MAXIOV]; + struct nfs_writeargs args = { NFS_FH(inode), 0, 0, NFS_FILE_SYNC, 0, pages }; + +#ifdef CONFIG_NFS_V3 + if ((NFS_PROTO(inode)->version == 3) && (count > wsize) && + (!IS_SYNC(inode))) + args.stable = NFS_UNSTABLE; +#endif + +retry: + total = 0; + curpage = 0; + while (count) { + int request, result; + struct page **dest = pages; + + request = count; + if (count > wsize) + request = wsize; + args.count = request; + args.offset = offset; + args.pgbase = (iobuf->offset + total) & ~PAGE_MASK; + + do { + struct page *page = iobuf->maplist[curpage]; + + if (!page) + return -EFAULT; + + *dest++ = page; + /* zero after the first iov */ + if (request > PAGE_SIZE) + break; + request -= PAGE_SIZE; + curpage++; + } while (request != 0 && curpage < iobuf->nr_pages); + + result = nfs_direct_write_rpc(file, &args, &ret_verf); + + if (result < 0) { + total = result; + break; + } + + if (!total) + memcpy(&first_verf.verifier, &ret_verf.verifier, + VERF_SIZE); + if (ret_verf.committed != NFS_FILE_SYNC) { + need_commit = 1; + if (memcmp(&first_verf.verifier, &ret_verf.verifier, + VERF_SIZE)) + goto print_retry; + } + + total += result; + count -= result; + offset += result; + }; + + /* + * Commit data written so far, even in the event of an error + */ + if (need_commit) { + if (nfs_direct_commit_rpc(inode, save_offset, + iobuf->length - count, &ret_verf)) + goto print_retry; + if (memcmp(&first_verf.verifier, &ret_verf.verifier, + VERF_SIZE)) + goto print_retry; + } + + return total; + +print_retry: + printk(KERN_INFO __FUNCTION__ + ": detected server restart; retrying with FILE_SYNC\n"); + args.stable = NFS_FILE_SYNC; + offset = save_offset; + count = iobuf->length; + goto retry; +} + +/* + * Read or write data, moving the data directly to/from the + * application's buffer without caching in the page cache. + * + * Rules for direct I/O + * + * 1. block size = 512 bytes or more + * 2. file byte offset is block aligned + * 3. byte count is a multiple of block size + * 4. user buffer is not aligned + * 5. user buffer is faulted in and pinned + * + * These are verified before we get here. + */ +int +nfs_direct_IO(int rw, struct file *file, struct kiobuf *iobuf, + unsigned long blocknr, int blocksize) +{ + int result = -EINVAL; + size_t count = iobuf->length; + struct dentry *dentry = file->f_dentry; + struct inode *inode = dentry->d_inode; + loff_t offset = blocknr << inode->i_blkbits; + + switch (rw) { + case READ: + dfprintk(VFS, + "NFS: direct_IO(READ) (%s/%s) off/cnt(%Lu/%d)\n", + dentry->d_parent->d_name.name, + dentry->d_name.name, offset, count); + + result = nfs_direct_read(file, iobuf, offset, count); + break; + case WRITE: + dfprintk(VFS, + "NFS: direct_IO(WRITE) (%s/%s) off/cnt(%Lu/%d)\n", + dentry->d_parent->d_name.name, + dentry->d_name.name, offset, count); + + result = nfs_direct_write(file, iobuf, offset, count); + break; + default: + break; + } + + dfprintk(VFS, "NFS: direct_IO result = %d\n", result); + return result; +} diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/nfs/file.c linux.21pre4-ac2/fs/nfs/file.c --- linux.21pre4/fs/nfs/file.c 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/fs/nfs/file.c 2003-01-06 22:07:26.000000000 +0000 @@ -16,6 +16,7 @@ * nfs regular file handling functions */ +#include #include #include #include @@ -200,6 +201,9 @@ sync_page: nfs_sync_page, writepage: nfs_writepage, prepare_write: nfs_prepare_write, +#ifdef CONFIG_NFS_DIRECTIO + direct_IO: nfs_direct_IO, +#endif commit_write: nfs_commit_write }; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/nfs/Makefile linux.21pre4-ac2/fs/nfs/Makefile --- linux.21pre4/fs/nfs/Makefile 2003-01-29 16:26:30.000000000 +0000 +++ linux.21pre4-ac2/fs/nfs/Makefile 2003-01-06 22:07:26.000000000 +0000 @@ -14,6 +14,7 @@ obj-$(CONFIG_ROOT_NFS) += nfsroot.o mount_clnt.o obj-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o +obj-$(CONFIG_NFS_DIRECTIO) += direct.o obj-m := $(O_TARGET) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/nfs/nfsroot.c linux.21pre4-ac2/fs/nfs/nfsroot.c --- linux.21pre4/fs/nfs/nfsroot.c 2003-01-29 16:26:30.000000000 +0000 +++ linux.21pre4-ac2/fs/nfs/nfsroot.c 2003-02-04 14:57:46.000000000 +0000 @@ -384,10 +384,11 @@ "number from server, using default\n"); port = nfsd_port; } - nfs_port = htons(port); + nfs_port = port; dprintk("Root-NFS: Portmapper on server returned %d " "as nfsd port\n", port); } + nfs_port = htons(nfs_port); if ((port = root_nfs_getport(NFS_MNT_PROGRAM, mountd_ver, proto)) < 0) { printk(KERN_ERR "Root-NFS: Unable to get mountd port " diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/nfs/write.c linux.21pre4-ac2/fs/nfs/write.c --- linux.21pre4/fs/nfs/write.c 2003-01-29 16:26:30.000000000 +0000 +++ linux.21pre4-ac2/fs/nfs/write.c 2003-01-06 22:07:26.000000000 +0000 @@ -123,23 +123,6 @@ } /* - * This function will be used to simulate weak cache consistency - * under NFSv2 when the NFSv3 attribute patch is included. - * For the moment, we just call nfs_refresh_inode(). - */ -static __inline__ int -nfs_write_attributes(struct inode *inode, struct nfs_fattr *fattr) -{ - if ((fattr->valid & NFS_ATTR_FATTR) && !(fattr->valid & NFS_ATTR_WCC)) { - fattr->pre_size = NFS_CACHE_ISIZE(inode); - fattr->pre_mtime = NFS_CACHE_MTIME(inode); - fattr->pre_ctime = NFS_CACHE_CTIME(inode); - fattr->valid |= NFS_ATTR_WCC; - } - return nfs_refresh_inode(inode, fattr); -} - -/* * Write a page synchronously. * Offset is the data offset within the page. */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/noquot.c linux.21pre4-ac2/fs/noquot.c --- linux.21pre4/fs/noquot.c 2003-01-29 16:26:31.000000000 +0000 +++ linux.21pre4-ac2/fs/noquot.c 1970-01-01 01:00:00.000000000 +0100 @@ -1,15 +0,0 @@ -/* noquot.c: Quota stubs necessary for when quotas are not - * compiled into the kernel. - */ - -#include -#include -#include - -int nr_dquots, nr_free_dquots; -int max_dquots; - -asmlinkage long sys_quotactl(int cmd, const char *special, int id, caddr_t addr) -{ - return(-ENOSYS); -} diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/proc/array.c linux.21pre4-ac2/fs/proc/array.c --- linux.21pre4/fs/proc/array.c 2003-01-29 16:26:30.000000000 +0000 +++ linux.21pre4-ac2/fs/proc/array.c 2003-01-06 19:16:01.000000000 +0000 @@ -338,16 +338,15 @@ /* scale priority and nice values from timeslices to -20..20 */ /* to make it look like a "normal" Unix priority/nice value */ - priority = task->counter; - priority = 20 - (priority * 10 + DEF_COUNTER / 2) / DEF_COUNTER; - nice = task->nice; + priority = task_prio(task); + nice = task_nice(task); read_lock(&tasklist_lock); ppid = task->pid ? task->p_opptr->pid : 0; read_unlock(&tasklist_lock); res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \ %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %lu %lu %ld %lu %lu %lu %lu %lu \ -%lu %lu %lu %lu %lu %lu %lu %lu %d %d\n", +%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n", task->pid, task->comm, state, @@ -390,7 +389,9 @@ task->nswap, task->cnswap, task->exit_signal, - task->processor); + task_cpu(task), + task->rt_priority, + task->policy); if(mm) mmput(mm); return res; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/proc/proc_misc.c linux.21pre4-ac2/fs/proc/proc_misc.c --- linux.21pre4/fs/proc/proc_misc.c 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/fs/proc/proc_misc.c 2003-01-29 17:23:35.000000000 +0000 @@ -106,11 +106,11 @@ a = avenrun[0] + (FIXED_1/200); b = avenrun[1] + (FIXED_1/200); c = avenrun[2] + (FIXED_1/200); - len = sprintf(page,"%d.%02d %d.%02d %d.%02d %d/%d %d\n", + len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n", LOAD_INT(a), LOAD_FRAC(a), LOAD_INT(b), LOAD_FRAC(b), LOAD_INT(c), LOAD_FRAC(c), - nr_running, nr_threads, last_pid); + nr_running(), nr_threads, last_pid); return proc_calc_metrics(page, start, off, count, eof, len); } @@ -122,7 +122,7 @@ int len; uptime = jiffies; - idle = init_tasks[0]->times.tms_utime + init_tasks[0]->times.tms_stime; + idle = init_task.times.tms_utime + init_task.times.tms_stime; /* The formula for the fraction parts really is ((t * 100) / HZ) % 100, but that would overflow about every five days at HZ == 100. @@ -155,7 +155,11 @@ struct sysinfo i; int len; int pg_size ; + int committed; + /* FIXME: needs to be in headers */ + extern atomic_t vm_committed_space; + /* * display in kilobytes. */ @@ -164,6 +168,7 @@ si_meminfo(&i); si_swapinfo(&i); pg_size = atomic_read(&page_cache_size) - i.bufferram ; + committed = atomic_read(&vm_committed_space); len = sprintf(page, " total: used: free: shared: buffers: cached:\n" "Mem: %8Lu %8Lu %8Lu %8Lu %8Lu %8Lu\n" @@ -370,11 +375,11 @@ } } - proc_sprintf(page, &off, &len, - "\nctxt %u\n" + len += sprintf(page + len, + "\nctxt %lu\n" "btime %lu\n" "processes %lu\n", - kstat.context_swtch, + nr_context_switches(), xtime.tv_sec - jif / HZ, total_forks); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/quota.c linux.21pre4-ac2/fs/quota.c --- linux.21pre4/fs/quota.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/fs/quota.c 2003-01-06 23:23:02.000000000 +0000 @@ -0,0 +1,656 @@ +/* + * Quota code necessary even when VFS quota support is not compiled + * into the kernel. The interesting stuff is over in dquot.c, here + * we have symbols for initial quotactl(2) handling, the sysctl(2) + * variables, etc - things needed even when quota support disabled. + */ + +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_QIFACE_COMPAT +#include +#endif + + +int nr_dquots, nr_free_dquots; + +/* Check validity of quotactl */ +static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) +{ + if (type >= MAXQUOTAS) + return -EINVAL; + /* Is operation supported? */ + if (!sb->s_qcop) + return -ENOSYS; + + switch (cmd) { + case Q_GETFMT: + break; + case Q_QUOTAON: + if (!sb->s_qcop->quota_on) + return -ENOSYS; + break; + case Q_QUOTAOFF: + if (!sb->s_qcop->quota_off) + return -ENOSYS; + break; + case Q_SETINFO: + if (!sb->s_qcop->set_info) + return -ENOSYS; + break; + case Q_GETINFO: + if (!sb->s_qcop->get_info) + return -ENOSYS; + break; + case Q_SETQUOTA: + if (!sb->s_qcop->set_dqblk) + return -ENOSYS; + break; + case Q_GETQUOTA: + if (!sb->s_qcop->get_dqblk) + return -ENOSYS; + break; + case Q_SYNC: + if (!sb->s_qcop->quota_sync) + return -ENOSYS; + break; + case Q_XQUOTAON: + case Q_XQUOTAOFF: + case Q_XQUOTARM: + if (!sb->s_qcop->set_xstate) + return -ENOSYS; + break; + case Q_XGETQSTAT: + if (!sb->s_qcop->get_xstate) + return -ENOSYS; + break; + case Q_XSETQLIM: + if (!sb->s_qcop->set_xquota) + return -ENOSYS; + break; + case Q_XGETQUOTA: + if (!sb->s_qcop->get_xquota) + return -ENOSYS; + break; + default: + return -EINVAL; + } + + /* Is quota turned on for commands which need it? */ + switch (cmd) { + case Q_GETFMT: + case Q_GETINFO: + case Q_QUOTAOFF: + case Q_SETINFO: + case Q_SETQUOTA: + case Q_GETQUOTA: + if (!sb_has_quota_enabled(sb, type)) + return -ESRCH; + } + /* Check privileges */ + if (cmd == Q_GETQUOTA || cmd == Q_XGETQUOTA) { + if (((type == USRQUOTA && current->euid != id) || + (type == GRPQUOTA && !in_egroup_p(id))) && + !capable(CAP_SYS_ADMIN)) + return -EPERM; + } + else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO && cmd != Q_XGETQSTAT) + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return 0; +} + +/* Resolve device pathname to superblock */ +static struct super_block *resolve_dev(const char *path) +{ + int ret; + mode_t mode; + struct nameidata nd; + kdev_t dev; + struct super_block *sb; + + ret = user_path_walk(path, &nd); + if (ret) + goto out; + + dev = nd.dentry->d_inode->i_rdev; + mode = nd.dentry->d_inode->i_mode; + path_release(&nd); + + ret = -ENOTBLK; + if (!S_ISBLK(mode)) + goto out; + ret = -ENODEV; + sb = get_super(dev); + if (!sb) + goto out; + return sb; +out: + return ERR_PTR(ret); +} + +/* Copy parameters and call proper function */ +static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, caddr_t addr) +{ + int ret; + + switch (cmd) { + case Q_QUOTAON: { + char *pathname; + + if (IS_ERR(pathname = getname(addr))) + return PTR_ERR(pathname); + ret = sb->s_qcop->quota_on(sb, type, id, pathname); + putname(pathname); + return ret; + } + case Q_QUOTAOFF: + return sb->s_qcop->quota_off(sb, type); + + case Q_GETFMT: { + __u32 fmt; + + fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; + if (copy_to_user(addr, &fmt, sizeof(fmt))) + return -EFAULT; + return 0; + } + case Q_GETINFO: { + struct if_dqinfo info; + + if ((ret = sb->s_qcop->get_info(sb, type, &info))) + return ret; + if (copy_to_user(addr, &info, sizeof(info))) + return -EFAULT; + return 0; + } + case Q_SETINFO: { + struct if_dqinfo info; + + if (copy_from_user(&info, addr, sizeof(info))) + return -EFAULT; + return sb->s_qcop->set_info(sb, type, &info); + } + case Q_GETQUOTA: { + struct if_dqblk idq; + + if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq))) + return ret; + if (copy_to_user(addr, &idq, sizeof(idq))) + return -EFAULT; + return 0; + } + case Q_SETQUOTA: { + struct if_dqblk idq; + + if (copy_from_user(&idq, addr, sizeof(idq))) + return -EFAULT; + return sb->s_qcop->set_dqblk(sb, type, id, &idq); + } + case Q_SYNC: + return sb->s_qcop->quota_sync(sb, type); + + case Q_XQUOTAON: + case Q_XQUOTAOFF: + case Q_XQUOTARM: { + __u32 flags; + + if (copy_from_user(&flags, addr, sizeof(flags))) + return -EFAULT; + return sb->s_qcop->set_xstate(sb, flags, cmd); + } + case Q_XGETQSTAT: { + struct fs_quota_stat fqs; + + if ((ret = sb->s_qcop->get_xstate(sb, &fqs))) + return ret; + if (copy_to_user(addr, &fqs, sizeof(fqs))) + return -EFAULT; + return 0; + } + case Q_XSETQLIM: { + struct fs_disk_quota fdq; + + if (copy_from_user(&fdq, addr, sizeof(fdq))) + return -EFAULT; + return sb->s_qcop->set_xquota(sb, type, id, &fdq); + } + case Q_XGETQUOTA: { + struct fs_disk_quota fdq; + + if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq))) + return ret; + if (copy_to_user(addr, &fdq, sizeof(fdq))) + return -EFAULT; + return 0; + } + /* We never reach here unless validity check is broken */ + default: + BUG(); + } + return 0; +} + +#ifdef CONFIG_QIFACE_COMPAT +static int check_compat_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) +{ + if (type >= MAXQUOTAS) + return -EINVAL; + /* Is operation supported? */ + /* sb==NULL for GETSTATS calls */ + if (sb && !sb->s_qcop) + return -ENOSYS; + + switch (cmd) { + case Q_COMP_QUOTAON: + if (!sb->s_qcop->quota_on) + return -ENOSYS; + break; + case Q_COMP_QUOTAOFF: + if (!sb->s_qcop->quota_off) + return -ENOSYS; + break; + case Q_COMP_SYNC: + if (!sb->s_qcop->quota_sync) + return -ENOSYS; + break; +#ifdef CONFIG_QIFACE_V2 + case Q_V2_SETFLAGS: + case Q_V2_SETGRACE: + case Q_V2_SETINFO: + if (!sb->s_qcop->set_info) + return -ENOSYS; + break; + case Q_V2_GETINFO: + if (!sb->s_qcop->get_info) + return -ENOSYS; + break; + case Q_V2_SETQLIM: + case Q_V2_SETUSE: + case Q_V2_SETQUOTA: + if (!sb->s_qcop->set_dqblk) + return -ENOSYS; + break; + case Q_V2_GETQUOTA: + if (!sb->s_qcop->get_dqblk) + return -ENOSYS; + break; + case Q_V2_GETSTATS: + return 0; /* GETSTATS need no other checks */ +#endif +#ifdef CONFIG_QIFACE_V1 + case Q_V1_SETQLIM: + case Q_V1_SETUSE: + case Q_V1_SETQUOTA: + if (!sb->s_qcop->set_dqblk) + return -ENOSYS; + break; + case Q_V1_GETQUOTA: + if (!sb->s_qcop->get_dqblk) + return -ENOSYS; + break; + case Q_V1_RSQUASH: + if (!sb->s_qcop->set_info) + return -ENOSYS; + break; + case Q_V1_GETSTATS: + return 0; /* GETSTATS need no other checks */ +#endif + default: + return -EINVAL; + } + + /* Is quota turned on for commands which need it? */ + switch (cmd) { + case Q_V2_SETFLAGS: + case Q_V2_SETGRACE: + case Q_V2_SETINFO: + case Q_V2_GETINFO: + case Q_COMP_QUOTAOFF: + case Q_V1_RSQUASH: + case Q_V1_SETQUOTA: + case Q_V1_SETQLIM: + case Q_V1_SETUSE: + case Q_V2_SETQUOTA: + /* Q_V2_SETQLIM: collision with Q_V1_SETQLIM */ + case Q_V2_SETUSE: + case Q_V1_GETQUOTA: + case Q_V2_GETQUOTA: + if (!sb_has_quota_enabled(sb, type)) + return -ESRCH; + } +#ifdef CONFIG_QIFACE_V1 + if (cmd != Q_COMP_QUOTAON && cmd != Q_COMP_QUOTAOFF && cmd != Q_COMP_SYNC && sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id != QFMT_VFS_OLD) +#else + if (cmd != Q_COMP_QUOTAON && cmd != Q_COMP_QUOTAOFF && cmd != Q_COMP_SYNC && sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id != QFMT_VFS_V0) +#endif + return -ESRCH; + + /* Check privileges */ + if (cmd == Q_V1_GETQUOTA || cmd == Q_V2_GETQUOTA) { + if (((type == USRQUOTA && current->euid != id) || + (type == GRPQUOTA && !in_egroup_p(id))) && + !capable(CAP_SYS_ADMIN)) + return -EPERM; + } + else if (cmd != Q_V1_GETSTATS && cmd != Q_V2_GETSTATS && cmd != Q_V2_GETINFO && cmd != Q_COMP_SYNC) + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return 0; +} + +#ifdef CONFIG_QIFACE_V1 +static int v1_set_rsquash(struct super_block *sb, int type, int flag) +{ + struct if_dqinfo info; + + info.dqi_valid = IIF_FLAGS; + info.dqi_flags = flag ? V1_DQF_RSQUASH : 0; + return sb->s_qcop->set_info(sb, type, &info); +} + +static int v1_get_dqblk(struct super_block *sb, int type, qid_t id, struct v1c_mem_dqblk *mdq) +{ + struct if_dqblk idq; + int ret; + + if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq)) < 0) + return ret; + mdq->dqb_ihardlimit = idq.dqb_ihardlimit; + mdq->dqb_isoftlimit = idq.dqb_isoftlimit; + mdq->dqb_curinodes = idq.dqb_curinodes; + mdq->dqb_bhardlimit = idq.dqb_bhardlimit; + mdq->dqb_bsoftlimit = idq.dqb_bsoftlimit; + mdq->dqb_curblocks = toqb(idq.dqb_curspace); + mdq->dqb_itime = idq.dqb_itime; + mdq->dqb_btime = idq.dqb_btime; + if (id == 0) { /* Times for id 0 are in fact grace times */ + struct if_dqinfo info; + + if ((ret = sb->s_qcop->get_info(sb, type, &info)) < 0) + return ret; + mdq->dqb_btime = info.dqi_bgrace; + mdq->dqb_itime = info.dqi_igrace; + } + return 0; +} + +static int v1_set_dqblk(struct super_block *sb, int type, int cmd, qid_t id, struct v1c_mem_dqblk *mdq) +{ + struct if_dqblk idq; + int ret; + + idq.dqb_valid = 0; + if (cmd == Q_V1_SETQUOTA || cmd == Q_V1_SETQLIM) { + idq.dqb_ihardlimit = mdq->dqb_ihardlimit; + idq.dqb_isoftlimit = mdq->dqb_isoftlimit; + idq.dqb_bhardlimit = mdq->dqb_bhardlimit; + idq.dqb_bsoftlimit = mdq->dqb_bsoftlimit; + idq.dqb_valid |= QIF_LIMITS; + } + if (cmd == Q_V1_SETQUOTA || cmd == Q_V1_SETUSE) { + idq.dqb_curinodes = mdq->dqb_curinodes; + idq.dqb_curspace = ((qsize_t)mdq->dqb_curblocks) << QUOTABLOCK_BITS; + idq.dqb_valid |= QIF_USAGE; + } + ret = sb->s_qcop->set_dqblk(sb, type, id, &idq); + if (!ret && id == 0 && cmd == Q_V1_SETQUOTA) { /* Times for id 0 are in fact grace times */ + struct if_dqinfo info; + + info.dqi_bgrace = mdq->dqb_btime; + info.dqi_igrace = mdq->dqb_itime; + info.dqi_valid = IIF_BGRACE | IIF_IGRACE; + ret = sb->s_qcop->set_info(sb, type, &info); + } + return ret; +} + +static void v1_get_stats(struct v1c_dqstats *dst) +{ + memcpy(dst, &dqstats, sizeof(dqstats)); +} +#endif + +#ifdef CONFIG_QIFACE_V2 +static int v2_get_info(struct super_block *sb, int type, struct v2c_mem_dqinfo *oinfo) +{ + struct if_dqinfo info; + int ret; + + if ((ret = sb->s_qcop->get_info(sb, type, &info)) < 0) + return ret; + oinfo->dqi_bgrace = info.dqi_bgrace; + oinfo->dqi_igrace = info.dqi_igrace; + oinfo->dqi_flags = info.dqi_flags; + oinfo->dqi_blocks = sb_dqopt(sb)->info[type].u.v2_i.dqi_blocks; + oinfo->dqi_free_blk = sb_dqopt(sb)->info[type].u.v2_i.dqi_free_blk; + oinfo->dqi_free_entry = sb_dqopt(sb)->info[type].u.v2_i.dqi_free_entry; + return 0; +} + +static int v2_set_info(struct super_block *sb, int type, int cmd, struct v2c_mem_dqinfo *oinfo) +{ + struct if_dqinfo info; + + info.dqi_valid = 0; + if (cmd == Q_V2_SETGRACE || cmd == Q_V2_SETINFO) { + info.dqi_bgrace = oinfo->dqi_bgrace; + info.dqi_igrace = oinfo->dqi_igrace; + info.dqi_valid |= IIF_BGRACE | IIF_IGRACE; + } + if (cmd == Q_V2_SETFLAGS || cmd == Q_V2_SETINFO) { + info.dqi_flags = oinfo->dqi_flags; + info.dqi_valid |= IIF_FLAGS; + } + /* We don't simulate deadly effects of setting other parameters ;-) */ + return sb->s_qcop->set_info(sb, type, &info); +} + +static int v2_get_dqblk(struct super_block *sb, int type, qid_t id, struct v2c_mem_dqblk *mdq) +{ + struct if_dqblk idq; + int ret; + + if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq)) < 0) + return ret; + mdq->dqb_ihardlimit = idq.dqb_ihardlimit; + mdq->dqb_isoftlimit = idq.dqb_isoftlimit; + mdq->dqb_curinodes = idq.dqb_curinodes; + mdq->dqb_bhardlimit = idq.dqb_bhardlimit; + mdq->dqb_bsoftlimit = idq.dqb_bsoftlimit; + mdq->dqb_curspace = idq.dqb_curspace; + mdq->dqb_itime = idq.dqb_itime; + mdq->dqb_btime = idq.dqb_btime; + return 0; +} + +static int v2_set_dqblk(struct super_block *sb, int type, int cmd, qid_t id, struct v2c_mem_dqblk *mdq) +{ + struct if_dqblk idq; + + idq.dqb_valid = 0; + if (cmd == Q_V2_SETQUOTA || cmd == Q_V2_SETQLIM) { + idq.dqb_ihardlimit = mdq->dqb_ihardlimit; + idq.dqb_isoftlimit = mdq->dqb_isoftlimit; + idq.dqb_bhardlimit = mdq->dqb_bhardlimit; + idq.dqb_bsoftlimit = mdq->dqb_bsoftlimit; + idq.dqb_valid |= QIF_LIMITS; + } + if (cmd == Q_V2_SETQUOTA || cmd == Q_V2_SETUSE) { + idq.dqb_curinodes = mdq->dqb_curinodes; + idq.dqb_curspace = mdq->dqb_curspace; + idq.dqb_valid |= QIF_USAGE; + } + return sb->s_qcop->set_dqblk(sb, type, id, &idq); +} + +static void v2_get_stats(struct v2c_dqstats *dst) +{ + memcpy(dst, &dqstats, sizeof(dqstats)); + dst->version = __DQUOT_NUM_VERSION__; +} +#endif + +/* Handle requests to old interface */ +static int do_compat_quotactl(struct super_block *sb, int type, int cmd, qid_t id, caddr_t addr) +{ + int ret; + + switch (cmd) { + case Q_COMP_QUOTAON: { + char *pathname; + + if (IS_ERR(pathname = getname(addr))) + return PTR_ERR(pathname); +#ifdef CONFIG_QIFACE_V1 + ret = sb->s_qcop->quota_on(sb, type, QFMT_VFS_OLD, pathname); +#else + ret = sb->s_qcop->quota_on(sb, type, QFMT_VFS_V0, pathname); +#endif + putname(pathname); + return ret; + } + case Q_COMP_QUOTAOFF: + return sb->s_qcop->quota_off(sb, type); + case Q_COMP_SYNC: + return sb->s_qcop->quota_sync(sb, type); +#ifdef CONFIG_QIFACE_V1 + case Q_V1_RSQUASH: { + int flag; + + if (copy_from_user(&flag, addr, sizeof(flag))) + return -EFAULT; + return v1_set_rsquash(sb, type, flag); + } + case Q_V1_GETQUOTA: { + struct v1c_mem_dqblk mdq; + + if ((ret = v1_get_dqblk(sb, type, id, &mdq))) + return ret; + if (copy_to_user(addr, &mdq, sizeof(mdq))) + return -EFAULT; + return 0; + } + case Q_V1_SETQLIM: + case Q_V1_SETUSE: + case Q_V1_SETQUOTA: { + struct v1c_mem_dqblk mdq; + + if (copy_from_user(&mdq, addr, sizeof(mdq))) + return -EFAULT; + return v1_set_dqblk(sb, type, cmd, id, &mdq); + } + case Q_V1_GETSTATS: { + struct v1c_dqstats dst; + + v1_get_stats(&dst); + if (copy_to_user(addr, &dst, sizeof(dst))) + return -EFAULT; + return 0; + } +#endif +#ifdef CONFIG_QIFACE_V2 + case Q_V2_GETINFO: { + struct v2c_mem_dqinfo info; + + if ((ret = v2_get_info(sb, type, &info))) + return ret; + if (copy_to_user(addr, &info, sizeof(info))) + return -EFAULT; + return 0; + } + case Q_V2_SETFLAGS: + case Q_V2_SETGRACE: + case Q_V2_SETINFO: { + struct v2c_mem_dqinfo info; + + if (copy_from_user(&info, addr, sizeof(info))) + return -EFAULT; + + return v2_set_info(sb, type, cmd, &info); + } + case Q_V2_GETQUOTA: { + struct v2c_mem_dqblk mdq; + + if ((ret = v2_get_dqblk(sb, type, id, &mdq))) + return ret; + if (copy_to_user(addr, &mdq, sizeof(mdq))) + return -EFAULT; + return 0; + } + case Q_V2_SETUSE: + case Q_V2_SETQLIM: + case Q_V2_SETQUOTA: { + struct v2c_mem_dqblk mdq; + + if (copy_from_user(&mdq, addr, sizeof(mdq))) + return -EFAULT; + return v2_set_dqblk(sb, type, cmd, id, &mdq); + } + case Q_V2_GETSTATS: { + struct v2c_dqstats dst; + + v2_get_stats(&dst); + if (copy_to_user(addr, &dst, sizeof(dst))) + return -EFAULT; + return 0; + } +#endif + } + BUG(); + return 0; +} +#endif + +/* Macros for short-circuiting the compatibility tests */ +#define NEW_COMMAND(c) ((c) & (0x80 << 16)) +#define XQM_COMMAND(c) (((c) & ('X' << 8)) == ('X' << 8)) + +/* + * This is the system call interface. This communicates with + * the user-level programs. Currently this only supports diskquota + * calls. Maybe we need to add the process quotas etc. in the future, + * but we probably should use rlimits for that. + */ +asmlinkage long sys_quotactl(unsigned int cmd, const char *special, qid_t id, caddr_t addr) +{ + uint cmds, type; + struct super_block *sb = NULL; + int ret = -EINVAL; + + lock_kernel(); + cmds = cmd >> SUBCMDSHIFT; + type = cmd & SUBCMDMASK; + +#ifdef CONFIG_QIFACE_COMPAT + if (cmds != Q_V1_GETSTATS && cmds != Q_V2_GETSTATS && IS_ERR(sb = resolve_dev(special))) { + ret = PTR_ERR(sb); + sb = NULL; + goto out; + } + if (!NEW_COMMAND(cmds) && !XQM_COMMAND(cmds)) { + if ((ret = check_compat_quotactl_valid(sb, type, cmds, id)) < 0) + goto out; + ret = do_compat_quotactl(sb, type, cmds, id, addr); + goto out; + } +#else + if (IS_ERR(sb = resolve_dev(special))) { + ret = PTR_ERR(sb); + sb = NULL; + goto out; + } +#endif + if ((ret = check_quotactl_valid(sb, type, cmds, id)) < 0) + goto out; + ret = do_quotactl(sb, type, cmds, id, addr); +out: + if (sb) + drop_super(sb); + unlock_kernel(); + return ret; +} diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/quota_v1.c linux.21pre4-ac2/fs/quota_v1.c --- linux.21pre4/fs/quota_v1.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/fs/quota_v1.c 2003-01-06 23:23:02.000000000 +0000 @@ -0,0 +1,239 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +static void v1_disk2mem_dqblk(struct mem_dqblk *m, struct v1_disk_dqblk *d) +{ + m->dqb_ihardlimit = d->dqb_ihardlimit; + m->dqb_isoftlimit = d->dqb_isoftlimit; + m->dqb_curinodes = d->dqb_curinodes; + m->dqb_bhardlimit = d->dqb_bhardlimit; + m->dqb_bsoftlimit = d->dqb_bsoftlimit; + m->dqb_curspace = d->dqb_curblocks << QUOTABLOCK_BITS; + m->dqb_itime = d->dqb_itime; + m->dqb_btime = d->dqb_btime; +} + +static void v1_mem2disk_dqblk(struct v1_disk_dqblk *d, struct mem_dqblk *m) +{ + d->dqb_ihardlimit = m->dqb_ihardlimit; + d->dqb_isoftlimit = m->dqb_isoftlimit; + d->dqb_curinodes = m->dqb_curinodes; + d->dqb_bhardlimit = m->dqb_bhardlimit; + d->dqb_bsoftlimit = m->dqb_bsoftlimit; + d->dqb_curblocks = toqb(m->dqb_curspace); + d->dqb_itime = m->dqb_itime; + d->dqb_btime = m->dqb_btime; +} + +static int v1_read_dqblk(struct dquot *dquot) +{ + int type = dquot->dq_type; + struct file *filp; + mm_segment_t fs; + loff_t offset; + struct v1_disk_dqblk dqblk; + + filp = sb_dqopt(dquot->dq_sb)->files[type]; + if (filp == (struct file *)NULL) + return -EINVAL; + + /* Now we are sure filp is valid */ + offset = v1_dqoff(dquot->dq_id); + fs = get_fs(); + set_fs(KERNEL_DS); + filp->f_op->read(filp, (char *)&dqblk, sizeof(struct v1_disk_dqblk), &offset); + set_fs(fs); + + v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk); + if (dquot->dq_dqb.dqb_bhardlimit == 0 && dquot->dq_dqb.dqb_bsoftlimit == 0 && + dquot->dq_dqb.dqb_ihardlimit == 0 && dquot->dq_dqb.dqb_isoftlimit == 0) + dquot->dq_flags |= DQ_FAKE; + dqstats.reads++; + return 0; +} + +static int v1_commit_dqblk(struct dquot *dquot) +{ + short type = dquot->dq_type; + struct file *filp; + mm_segment_t fs; + loff_t offset; + ssize_t ret; + struct v1_disk_dqblk dqblk; + + filp = sb_dqopt(dquot->dq_sb)->files[type]; + offset = v1_dqoff(dquot->dq_id); + fs = get_fs(); + set_fs(KERNEL_DS); + + /* + * Note: clear the DQ_MOD flag unconditionally, + * so we don't loop forever on failure. + */ + v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb); + dquot->dq_flags &= ~DQ_MOD; + if (dquot->dq_id == 0) { + dqblk.dqb_btime = sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace; + dqblk.dqb_itime = sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace; + } + ret = 0; + if (filp) + ret = filp->f_op->write(filp, (char *)&dqblk, + sizeof(struct v1_disk_dqblk), &offset); + if (ret != sizeof(struct v1_disk_dqblk)) { + printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", + kdevname(dquot->dq_dev)); + if (ret >= 0) + ret = -EIO; + goto out; + } + ret = 0; + +out: + set_fs(fs); + dqstats.writes++; + return ret; +} + +/* Magics of new quota format */ +#define V2_INITQMAGICS {\ + 0xd9c01f11, /* USRQUOTA */\ + 0xd9c01927 /* GRPQUOTA */\ +} + +/* Header of new quota format */ +struct v2_disk_dqheader { + __u32 dqh_magic; /* Magic number identifying file */ + __u32 dqh_version; /* File version */ +}; + +static int v1_check_quota_file(struct super_block *sb, int type) +{ + struct file *f = sb_dqopt(sb)->files[type]; + struct inode *inode = f->f_dentry->d_inode; + ulong blocks; + size_t off; + struct v2_disk_dqheader dqhead; + mm_segment_t fs; + ssize_t size; + loff_t offset = 0; + static const uint quota_magics[] = V2_INITQMAGICS; + + if (!inode->i_size) + return 0; + blocks = inode->i_size >> BLOCK_SIZE_BITS; + off = inode->i_size & (BLOCK_SIZE - 1); + if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % sizeof(struct v1_disk_dqblk)) + return 0; + /* Doublecheck whether we didn't get file with new format - with old quotactl() this could happen */ + fs = get_fs(); + set_fs(KERNEL_DS); + size = f->f_op->read(f, (char *)&dqhead, sizeof(struct v2_disk_dqheader), &offset); + set_fs(fs); + if (size != sizeof(struct v2_disk_dqheader)) + return 1; /* Probably not new format */ + if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type]) + return 1; /* Definitely not new format */ + printk(KERN_INFO "VFS: %s: Refusing to turn on old quota format on given file. It probably contains newer quota format.\n", kdevname(sb->s_dev)); + return 0; /* Seems like a new format file -> refuse it */ +} + +static int v1_read_file_info(struct super_block *sb, int type) +{ + struct quota_info *dqopt = sb_dqopt(sb); + mm_segment_t fs; + loff_t offset; + struct file *filp = dqopt->files[type]; + struct v1_disk_dqblk dqblk; + int ret; + + down(&dqopt->dqio_sem); + offset = v1_dqoff(0); + fs = get_fs(); + set_fs(KERNEL_DS); + if ((ret = filp->f_op->read(filp, (char *)&dqblk, sizeof(struct v1_disk_dqblk), &offset)) != sizeof(struct v1_disk_dqblk)) { + if (ret >= 0) + ret = -EIO; + goto out; + } + ret = 0; + dqopt->info[type].dqi_igrace = dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME; + dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME; +out: + up(&dqopt->dqio_sem); + set_fs(fs); + return ret; +} + +static int v1_write_file_info(struct super_block *sb, int type) +{ + struct quota_info *dqopt = sb_dqopt(sb); + mm_segment_t fs; + struct file *filp = dqopt->files[type]; + struct v1_disk_dqblk dqblk; + loff_t offset; + int ret; + + down(&dqopt->dqio_sem); + dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY; + offset = v1_dqoff(0); + fs = get_fs(); + set_fs(KERNEL_DS); + if ((ret = filp->f_op->read(filp, (char *)&dqblk, sizeof(struct v1_disk_dqblk), &offset)) != sizeof(struct v1_disk_dqblk)) { + if (ret >= 0) + ret = -EIO; + goto out; + } + dqblk.dqb_itime = dqopt->info[type].dqi_igrace; + dqblk.dqb_btime = dqopt->info[type].dqi_bgrace; + offset = v1_dqoff(0); + ret = filp->f_op->write(filp, (char *)&dqblk, sizeof(struct v1_disk_dqblk), &offset); + if (ret == sizeof(struct v1_disk_dqblk)) + ret = 0; + else if (ret > 0) + ret = -EIO; +out: + up(&dqopt->dqio_sem); + set_fs(fs); + return ret; +} + +static struct quota_format_ops v1_format_ops = { + check_quota_file: v1_check_quota_file, + read_file_info: v1_read_file_info, + write_file_info: v1_write_file_info, + free_file_info: NULL, + read_dqblk: v1_read_dqblk, + commit_dqblk: v1_commit_dqblk, +}; + +static struct quota_format_type v1_quota_format = { + qf_fmt_id: QFMT_VFS_OLD, + qf_ops: &v1_format_ops, + qf_owner: THIS_MODULE +}; + +static int __init init_v1_quota_format(void) +{ + return register_quota_format(&v1_quota_format); +} + +static void __exit exit_v1_quota_format(void) +{ + unregister_quota_format(&v1_quota_format); +} + +EXPORT_NO_SYMBOLS; + +module_init(init_v1_quota_format); +module_exit(exit_v1_quota_format); + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/quota_v2.c linux.21pre4-ac2/fs/quota_v2.c --- linux.21pre4/fs/quota_v2.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/fs/quota_v2.c 2003-01-06 23:23:02.000000000 +0000 @@ -0,0 +1,690 @@ +/* + * vfsv0 quota IO operations on file + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define __QUOTA_V2_PARANOIA + +typedef char *dqbuf_t; + +#define GETIDINDEX(id, depth) (((id) >> ((V2_DQTREEDEPTH-(depth)-1)*8)) & 0xff) +#define GETENTRIES(buf) ((struct v2_disk_dqblk *)(((char *)buf)+sizeof(struct v2_disk_dqdbheader))) + +/* Check whether given file is really vfsv0 quotafile */ +static int v2_check_quota_file(struct super_block *sb, int type) +{ + struct v2_disk_dqheader dqhead; + struct file *f = sb_dqopt(sb)->files[type]; + mm_segment_t fs; + ssize_t size; + loff_t offset = 0; + static const uint quota_magics[] = V2_INITQMAGICS; + static const uint quota_versions[] = V2_INITQVERSIONS; + + fs = get_fs(); + set_fs(KERNEL_DS); + size = f->f_op->read(f, (char *)&dqhead, sizeof(struct v2_disk_dqheader), &offset); + set_fs(fs); + if (size != sizeof(struct v2_disk_dqheader)) + return 0; + if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type] || + le32_to_cpu(dqhead.dqh_version) != quota_versions[type]) + return 0; + return 1; +} + +/* Read information header from quota file */ +static int v2_read_file_info(struct super_block *sb, int type) +{ + mm_segment_t fs; + struct v2_disk_dqinfo dinfo; + struct mem_dqinfo *info = sb_dqopt(sb)->info+type; + struct file *f = sb_dqopt(sb)->files[type]; + ssize_t size; + loff_t offset = V2_DQINFOOFF; + + fs = get_fs(); + set_fs(KERNEL_DS); + size = f->f_op->read(f, (char *)&dinfo, sizeof(struct v2_disk_dqinfo), &offset); + set_fs(fs); + if (size != sizeof(struct v2_disk_dqinfo)) { + printk(KERN_WARNING "Can't read info structure on device %s.\n", + kdevname(f->f_dentry->d_sb->s_dev)); + return -1; + } + info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); + info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); + info->dqi_flags = le32_to_cpu(dinfo.dqi_flags); + info->u.v2_i.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks); + info->u.v2_i.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk); + info->u.v2_i.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry); + return 0; +} + +/* Write information header to quota file */ +static int v2_write_file_info(struct super_block *sb, int type) +{ + mm_segment_t fs; + struct v2_disk_dqinfo dinfo; + struct mem_dqinfo *info = sb_dqopt(sb)->info+type; + struct file *f = sb_dqopt(sb)->files[type]; + ssize_t size; + loff_t offset = V2_DQINFOOFF; + + info->dqi_flags &= ~DQF_INFO_DIRTY; + dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace); + dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace); + dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK); + dinfo.dqi_blocks = cpu_to_le32(info->u.v2_i.dqi_blocks); + dinfo.dqi_free_blk = cpu_to_le32(info->u.v2_i.dqi_free_blk); + dinfo.dqi_free_entry = cpu_to_le32(info->u.v2_i.dqi_free_entry); + fs = get_fs(); + set_fs(KERNEL_DS); + size = f->f_op->write(f, (char *)&dinfo, sizeof(struct v2_disk_dqinfo), &offset); + set_fs(fs); + if (size != sizeof(struct v2_disk_dqinfo)) { + printk(KERN_WARNING "Can't write info structure on device %s.\n", + kdevname(f->f_dentry->d_sb->s_dev)); + return -1; + } + return 0; +} + +static void disk2memdqb(struct mem_dqblk *m, struct v2_disk_dqblk *d) +{ + m->dqb_ihardlimit = le32_to_cpu(d->dqb_ihardlimit); + m->dqb_isoftlimit = le32_to_cpu(d->dqb_isoftlimit); + m->dqb_curinodes = le32_to_cpu(d->dqb_curinodes); + m->dqb_itime = le64_to_cpu(d->dqb_itime); + m->dqb_bhardlimit = le32_to_cpu(d->dqb_bhardlimit); + m->dqb_bsoftlimit = le32_to_cpu(d->dqb_bsoftlimit); + m->dqb_curspace = le64_to_cpu(d->dqb_curspace); + m->dqb_btime = le64_to_cpu(d->dqb_btime); +} + +static void mem2diskdqb(struct v2_disk_dqblk *d, struct mem_dqblk *m, qid_t id) +{ + d->dqb_ihardlimit = cpu_to_le32(m->dqb_ihardlimit); + d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit); + d->dqb_curinodes = cpu_to_le32(m->dqb_curinodes); + d->dqb_itime = cpu_to_le64(m->dqb_itime); + d->dqb_bhardlimit = cpu_to_le32(m->dqb_bhardlimit); + d->dqb_bsoftlimit = cpu_to_le32(m->dqb_bsoftlimit); + d->dqb_curspace = cpu_to_le64(m->dqb_curspace); + d->dqb_btime = cpu_to_le64(m->dqb_btime); + d->dqb_id = cpu_to_le32(id); +} + +static dqbuf_t getdqbuf(void) +{ + dqbuf_t buf = kmalloc(V2_DQBLKSIZE, GFP_KERNEL); + if (!buf) + printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n"); + return buf; +} + +static inline void freedqbuf(dqbuf_t buf) +{ + kfree(buf); +} + +static ssize_t read_blk(struct file *filp, uint blk, dqbuf_t buf) +{ + mm_segment_t fs; + ssize_t ret; + loff_t offset = blk<f_op->read(filp, (char *)buf, V2_DQBLKSIZE, &offset); + set_fs(fs); + return ret; +} + +static ssize_t write_blk(struct file *filp, uint blk, dqbuf_t buf) +{ + mm_segment_t fs; + ssize_t ret; + loff_t offset = blk<f_op->write(filp, (char *)buf, V2_DQBLKSIZE, &offset); + set_fs(fs); + return ret; + +} + +/* Remove empty block from list and return it */ +static int get_free_dqblk(struct file *filp, struct mem_dqinfo *info) +{ + dqbuf_t buf = getdqbuf(); + struct v2_disk_dqdbheader *dh = (struct v2_disk_dqdbheader *)buf; + int ret, blk; + + if (!buf) + return -ENOMEM; + if (info->u.v2_i.dqi_free_blk) { + blk = info->u.v2_i.dqi_free_blk; + if ((ret = read_blk(filp, blk, buf)) < 0) + goto out_buf; + info->u.v2_i.dqi_free_blk = le32_to_cpu(dh->dqdh_next_free); + } + else { + memset(buf, 0, V2_DQBLKSIZE); + if ((ret = write_blk(filp, info->u.v2_i.dqi_blocks, buf)) < 0) /* Assure block allocation... */ + goto out_buf; + blk = info->u.v2_i.dqi_blocks++; + } + mark_info_dirty(info); + ret = blk; +out_buf: + freedqbuf(buf); + return ret; +} + +/* Insert empty block to the list */ +static int put_free_dqblk(struct file *filp, struct mem_dqinfo *info, dqbuf_t buf, uint blk) +{ + struct v2_disk_dqdbheader *dh = (struct v2_disk_dqdbheader *)buf; + int err; + + dh->dqdh_next_free = cpu_to_le32(info->u.v2_i.dqi_free_blk); + dh->dqdh_prev_free = cpu_to_le32(0); + dh->dqdh_entries = cpu_to_le16(0); + info->u.v2_i.dqi_free_blk = blk; + mark_info_dirty(info); + if ((err = write_blk(filp, blk, buf)) < 0) /* Some strange block. We had better leave it... */ + return err; + return 0; +} + +/* Remove given block from the list of blocks with free entries */ +static int remove_free_dqentry(struct file *filp, struct mem_dqinfo *info, dqbuf_t buf, uint blk) +{ + dqbuf_t tmpbuf = getdqbuf(); + struct v2_disk_dqdbheader *dh = (struct v2_disk_dqdbheader *)buf; + uint nextblk = le32_to_cpu(dh->dqdh_next_free), prevblk = le32_to_cpu(dh->dqdh_prev_free); + int err; + + if (!tmpbuf) + return -ENOMEM; + if (nextblk) { + if ((err = read_blk(filp, nextblk, tmpbuf)) < 0) + goto out_buf; + ((struct v2_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = dh->dqdh_prev_free; + if ((err = write_blk(filp, nextblk, tmpbuf)) < 0) + goto out_buf; + } + if (prevblk) { + if ((err = read_blk(filp, prevblk, tmpbuf)) < 0) + goto out_buf; + ((struct v2_disk_dqdbheader *)tmpbuf)->dqdh_next_free = dh->dqdh_next_free; + if ((err = write_blk(filp, prevblk, tmpbuf)) < 0) + goto out_buf; + } + else { + info->u.v2_i.dqi_free_entry = nextblk; + mark_info_dirty(info); + } + freedqbuf(tmpbuf); + dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); + if (write_blk(filp, blk, buf) < 0) /* No matter whether write succeeds block is out of list */ + printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk); + return 0; +out_buf: + freedqbuf(tmpbuf); + return err; +} + +/* Insert given block to the beginning of list with free entries */ +static int insert_free_dqentry(struct file *filp, struct mem_dqinfo *info, dqbuf_t buf, uint blk) +{ + dqbuf_t tmpbuf = getdqbuf(); + struct v2_disk_dqdbheader *dh = (struct v2_disk_dqdbheader *)buf; + int err; + + if (!tmpbuf) + return -ENOMEM; + dh->dqdh_next_free = cpu_to_le32(info->u.v2_i.dqi_free_entry); + dh->dqdh_prev_free = cpu_to_le32(0); + if ((err = write_blk(filp, blk, buf)) < 0) + goto out_buf; + if (info->u.v2_i.dqi_free_entry) { + if ((err = read_blk(filp, info->u.v2_i.dqi_free_entry, tmpbuf)) < 0) + goto out_buf; + ((struct v2_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = cpu_to_le32(blk); + if ((err = write_blk(filp, info->u.v2_i.dqi_free_entry, tmpbuf)) < 0) + goto out_buf; + } + freedqbuf(tmpbuf); + info->u.v2_i.dqi_free_entry = blk; + mark_info_dirty(info); + return 0; +out_buf: + freedqbuf(tmpbuf); + return err; +} + +/* Find space for dquot */ +static uint find_free_dqentry(struct dquot *dquot, int *err) +{ + struct file *filp = sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; + struct mem_dqinfo *info = sb_dqopt(dquot->dq_sb)->info+dquot->dq_type; + uint blk, i; + struct v2_disk_dqdbheader *dh; + struct v2_disk_dqblk *ddquot; + struct v2_disk_dqblk fakedquot; + dqbuf_t buf; + + *err = 0; + if (!(buf = getdqbuf())) { + *err = -ENOMEM; + return 0; + } + dh = (struct v2_disk_dqdbheader *)buf; + ddquot = GETENTRIES(buf); + if (info->u.v2_i.dqi_free_entry) { + blk = info->u.v2_i.dqi_free_entry; + if ((*err = read_blk(filp, blk, buf)) < 0) + goto out_buf; + } + else { + blk = get_free_dqblk(filp, info); + if ((int)blk < 0) { + *err = blk; + return 0; + } + memset(buf, 0, V2_DQBLKSIZE); + info->u.v2_i.dqi_free_entry = blk; /* This is enough as block is already zeroed and entry list is empty... */ + mark_info_dirty(info); + } + if (le16_to_cpu(dh->dqdh_entries)+1 >= V2_DQSTRINBLK) /* Block will be full? */ + if ((*err = remove_free_dqentry(filp, info, buf, blk)) < 0) { + printk(KERN_ERR "VFS: find_free_dqentry(): Can't remove block (%u) from entry free list.\n", blk); + goto out_buf; + } + dh->dqdh_entries = cpu_to_le16(le16_to_cpu(dh->dqdh_entries)+1); + memset(&fakedquot, 0, sizeof(struct v2_disk_dqblk)); + /* Find free structure in block */ + for (i = 0; i < V2_DQSTRINBLK && memcmp(&fakedquot, ddquot+i, sizeof(struct v2_disk_dqblk)); i++); +#ifdef __QUOTA_V2_PARANOIA + if (i == V2_DQSTRINBLK) { + printk(KERN_ERR "VFS: find_free_dqentry(): Data block full but it shouldn't.\n"); + *err = -EIO; + goto out_buf; + } +#endif + if ((*err = write_blk(filp, blk, buf)) < 0) { + printk(KERN_ERR "VFS: find_free_dqentry(): Can't write quota data block %u.\n", blk); + goto out_buf; + } + dquot->dq_off = (blk<dq_sb)->files[dquot->dq_type]; + struct mem_dqinfo *info = sb_dqopt(dquot->dq_sb)->info + dquot->dq_type; + dqbuf_t buf; + int ret = 0, newson = 0, newact = 0; + u32 *ref; + uint newblk; + + if (!(buf = getdqbuf())) + return -ENOMEM; + if (!*treeblk) { + ret = get_free_dqblk(filp, info); + if (ret < 0) + goto out_buf; + *treeblk = ret; + memset(buf, 0, V2_DQBLKSIZE); + newact = 1; + } + else { + if ((ret = read_blk(filp, *treeblk, buf)) < 0) { + printk(KERN_ERR "VFS: Can't read tree quota block %u.\n", *treeblk); + goto out_buf; + } + } + ref = (u32 *)buf; + newblk = le32_to_cpu(ref[GETIDINDEX(dquot->dq_id, depth)]); + if (!newblk) + newson = 1; + if (depth == V2_DQTREEDEPTH-1) { +#ifdef __QUOTA_V2_PARANOIA + if (newblk) { + printk(KERN_ERR "VFS: Inserting already present quota entry (block %u).\n", ref[GETIDINDEX(dquot->dq_id, depth)]); + ret = -EIO; + goto out_buf; + } +#endif + newblk = find_free_dqentry(dquot, &ret); + } + else + ret = do_insert_tree(dquot, &newblk, depth+1); + if (newson && ret >= 0) { + ref[GETIDINDEX(dquot->dq_id, depth)] = cpu_to_le32(newblk); + ret = write_blk(filp, *treeblk, buf); + } + else if (newact && ret < 0) + put_free_dqblk(filp, info, buf, *treeblk); +out_buf: + freedqbuf(buf); + return ret; +} + +/* Wrapper for inserting quota structure into tree */ +static inline int dq_insert_tree(struct dquot *dquot) +{ + int tmp = V2_DQTREEOFF; + return do_insert_tree(dquot, &tmp, 0); +} + +/* + * We don't have to be afraid of deadlocks as we never have quotas on quota files... + */ +static int v2_write_dquot(struct dquot *dquot) +{ + int type = dquot->dq_type; + struct file *filp; + mm_segment_t fs; + loff_t offset; + ssize_t ret; + struct v2_disk_dqblk ddquot; + + if (!dquot->dq_off) + if ((ret = dq_insert_tree(dquot)) < 0) { + printk(KERN_ERR "VFS: Error %d occured while creating quota.\n", ret); + return ret; + } + filp = sb_dqopt(dquot->dq_sb)->files[type]; + offset = dquot->dq_off; + mem2diskdqb(&ddquot, &dquot->dq_dqb, dquot->dq_id); + fs = get_fs(); + set_fs(KERNEL_DS); + ret = filp->f_op->write(filp, (char *)&ddquot, sizeof(struct v2_disk_dqblk), &offset); + set_fs(fs); + if (ret != sizeof(struct v2_disk_dqblk)) { + printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", kdevname(dquot->dq_dev)); + if (ret >= 0) + ret = -ENOSPC; + } + else + ret = 0; + dqstats.writes++; + return ret; +} + +/* Free dquot entry in data block */ +static int free_dqentry(struct dquot *dquot, uint blk) +{ + struct file *filp = sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; + struct mem_dqinfo *info = sb_dqopt(dquot->dq_sb)->info + dquot->dq_type; + struct v2_disk_dqdbheader *dh; + dqbuf_t buf = getdqbuf(); + int ret = 0; + + if (!buf) + return -ENOMEM; + if (dquot->dq_off >> V2_DQBLKSIZE_BITS != blk) { + printk(KERN_ERR "VFS: Quota structure has offset to other block (%u) than it should (%u).\n", blk, (uint)(dquot->dq_off >> V2_DQBLKSIZE_BITS)); + goto out_buf; + } + if ((ret = read_blk(filp, blk, buf)) < 0) { + printk(KERN_ERR "VFS: Can't read quota data block %u\n", blk); + goto out_buf; + } + dh = (struct v2_disk_dqdbheader *)buf; + dh->dqdh_entries = cpu_to_le16(le16_to_cpu(dh->dqdh_entries)-1); + if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */ + if ((ret = remove_free_dqentry(filp, info, buf, blk)) < 0 || + (ret = put_free_dqblk(filp, info, buf, blk)) < 0) { + printk(KERN_ERR "VFS: Can't move quota data block (%u) to free list.\n", blk); + goto out_buf; + } + } + else { + memset(buf+(dquot->dq_off & ((1 << V2_DQBLKSIZE_BITS)-1)), 0, sizeof(struct v2_disk_dqblk)); + if (le16_to_cpu(dh->dqdh_entries) == V2_DQSTRINBLK-1) { + /* Insert will write block itself */ + if ((ret = insert_free_dqentry(filp, info, buf, blk)) < 0) { + printk(KERN_ERR "VFS: Can't insert quota data block (%u) to free entry list.\n", blk); + goto out_buf; + } + } + else + if ((ret = write_blk(filp, blk, buf)) < 0) { + printk(KERN_ERR "VFS: Can't write quota data block %u\n", blk); + goto out_buf; + } + } + dquot->dq_off = 0; /* Quota is now unattached */ +out_buf: + freedqbuf(buf); + return ret; +} + +/* Remove reference to dquot from tree */ +static int remove_tree(struct dquot *dquot, uint *blk, int depth) +{ + struct file *filp = sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; + struct mem_dqinfo *info = sb_dqopt(dquot->dq_sb)->info + dquot->dq_type; + dqbuf_t buf = getdqbuf(); + int ret = 0; + uint newblk; + u32 *ref = (u32 *)buf; + + if (!buf) + return -ENOMEM; + if ((ret = read_blk(filp, *blk, buf)) < 0) { + printk(KERN_ERR "VFS: Can't read quota data block %u\n", *blk); + goto out_buf; + } + newblk = le32_to_cpu(ref[GETIDINDEX(dquot->dq_id, depth)]); + if (depth == V2_DQTREEDEPTH-1) { + ret = free_dqentry(dquot, newblk); + newblk = 0; + } + else + ret = remove_tree(dquot, &newblk, depth+1); + if (ret >= 0 && !newblk) { + int i; + ref[GETIDINDEX(dquot->dq_id, depth)] = cpu_to_le32(0); + for (i = 0; i < V2_DQBLKSIZE && !buf[i]; i++); /* Block got empty? */ + if (i == V2_DQBLKSIZE) { + put_free_dqblk(filp, info, buf, *blk); + *blk = 0; + } + else + if ((ret = write_blk(filp, *blk, buf)) < 0) + printk(KERN_ERR "VFS: Can't write quota tree block %u.\n", *blk); + } +out_buf: + freedqbuf(buf); + return ret; +} + +/* Delete dquot from tree */ +static int v2_delete_dquot(struct dquot *dquot) +{ + uint tmp = V2_DQTREEOFF; + + if (!dquot->dq_off) /* Even not allocated? */ + return 0; + return remove_tree(dquot, &tmp, 0); +} + +/* Find entry in block */ +static loff_t find_block_dqentry(struct dquot *dquot, uint blk) +{ + struct file *filp = sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; + dqbuf_t buf = getdqbuf(); + loff_t ret = 0; + int i; + struct v2_disk_dqblk *ddquot = GETENTRIES(buf); + + if (!buf) + return -ENOMEM; + if ((ret = read_blk(filp, blk, buf)) < 0) { + printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); + goto out_buf; + } + if (dquot->dq_id) + for (i = 0; i < V2_DQSTRINBLK && le32_to_cpu(ddquot[i].dqb_id) != dquot->dq_id; i++); + else { /* ID 0 as a bit more complicated searching... */ + struct v2_disk_dqblk fakedquot; + + memset(&fakedquot, 0, sizeof(struct v2_disk_dqblk)); + for (i = 0; i < V2_DQSTRINBLK; i++) + if (!le32_to_cpu(ddquot[i].dqb_id) && memcmp(&fakedquot, ddquot+i, sizeof(struct v2_disk_dqblk))) + break; + } + if (i == V2_DQSTRINBLK) { + printk(KERN_ERR "VFS: Quota for id %u referenced but not present.\n", dquot->dq_id); + ret = -EIO; + goto out_buf; + } + else + ret = (blk << V2_DQBLKSIZE_BITS) + sizeof(struct v2_disk_dqdbheader) + i * sizeof(struct v2_disk_dqblk); +out_buf: + freedqbuf(buf); + return ret; +} + +/* Find entry for given id in the tree */ +static loff_t find_tree_dqentry(struct dquot *dquot, uint blk, int depth) +{ + struct file *filp = sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; + dqbuf_t buf = getdqbuf(); + loff_t ret = 0; + u32 *ref = (u32 *)buf; + + if (!buf) + return -ENOMEM; + if ((ret = read_blk(filp, blk, buf)) < 0) { + printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); + goto out_buf; + } + ret = 0; + blk = le32_to_cpu(ref[GETIDINDEX(dquot->dq_id, depth)]); + if (!blk) /* No reference? */ + goto out_buf; + if (depth < V2_DQTREEDEPTH-1) + ret = find_tree_dqentry(dquot, blk, depth+1); + else + ret = find_block_dqentry(dquot, blk); +out_buf: + freedqbuf(buf); + return ret; +} + +/* Find entry for given id in the tree - wrapper function */ +static inline loff_t find_dqentry(struct dquot *dquot) +{ + return find_tree_dqentry(dquot, V2_DQTREEOFF, 0); +} + +static int v2_read_dquot(struct dquot *dquot) +{ + int type = dquot->dq_type; + struct file *filp; + mm_segment_t fs; + loff_t offset; + struct v2_disk_dqblk ddquot; + int ret = 0; + + filp = sb_dqopt(dquot->dq_sb)->files[type]; + +#ifdef __QUOTA_V2_PARANOIA + if (!filp || !dquot->dq_sb) { /* Invalidated quota? */ + printk(KERN_ERR "VFS: Quota invalidated while reading!\n"); + return -EIO; + } +#endif + offset = find_dqentry(dquot); + if (offset <= 0) { /* Entry not present? */ + if (offset < 0) + printk(KERN_ERR "VFS: Can't read quota structure for id %u.\n", dquot->dq_id); + dquot->dq_off = 0; + dquot->dq_flags |= DQ_FAKE; + memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); + ret = offset; + } + else { + dquot->dq_off = offset; + fs = get_fs(); + set_fs(KERNEL_DS); + if ((ret = filp->f_op->read(filp, (char *)&ddquot, sizeof(struct v2_disk_dqblk), &offset)) != sizeof(struct v2_disk_dqblk)) { + if (ret >= 0) + ret = -EIO; + printk(KERN_ERR "VFS: Error while reading quota structure for id %u.\n", dquot->dq_id); + memset(&ddquot, 0, sizeof(struct v2_disk_dqblk)); + } + else + ret = 0; + set_fs(fs); + disk2memdqb(&dquot->dq_dqb, &ddquot); + } + dqstats.reads++; + return ret; +} + +/* Commit changes of dquot to disk - it might also mean deleting it when quota became fake one and user has no blocks... */ +static int v2_commit_dquot(struct dquot *dquot) +{ + /* We clear the flag everytime so we don't loop when there was an IO error... */ + dquot->dq_flags &= ~DQ_MOD; + if (dquot->dq_flags & DQ_FAKE && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) + return v2_delete_dquot(dquot); + else + return v2_write_dquot(dquot); +} + +static struct quota_format_ops v2_format_ops = { + check_quota_file: v2_check_quota_file, + read_file_info: v2_read_file_info, + write_file_info: v2_write_file_info, + free_file_info: NULL, + read_dqblk: v2_read_dquot, + commit_dqblk: v2_commit_dquot, +}; + +static struct quota_format_type v2_quota_format = { + qf_fmt_id: QFMT_VFS_V0, + qf_ops: &v2_format_ops, + qf_owner: THIS_MODULE +}; + +static int __init init_v2_quota_format(void) +{ + return register_quota_format(&v2_quota_format); +} + +static void __exit exit_v2_quota_format(void) +{ + unregister_quota_format(&v2_quota_format); +} + +EXPORT_NO_SYMBOLS; + +module_init(init_v2_quota_format); +module_exit(exit_v2_quota_format); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/reiserfs/buffer2.c linux.21pre4-ac2/fs/reiserfs/buffer2.c --- linux.21pre4/fs/reiserfs/buffer2.c 2003-01-29 16:26:31.000000000 +0000 +++ linux.21pre4-ac2/fs/reiserfs/buffer2.c 2003-01-06 19:17:00.000000000 +0000 @@ -51,11 +51,11 @@ struct buffer_head * reiserfs_bread (struct super_block *super, int n_block, int n_size) { struct buffer_head *result; - PROC_EXP( unsigned int ctx_switches = kstat.context_swtch ); + PROC_EXP( unsigned int ctx_switches = nr_context_switches(); ); result = bread (super -> s_dev, n_block, n_size); PROC_INFO_INC( super, breads ); - PROC_EXP( if( kstat.context_swtch != ctx_switches ) + PROC_EXP( if( nr_context_switches() != ctx_switches ) PROC_INFO_INC( super, bread_miss ) ); return result; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/reiserfs/inode.c linux.21pre4-ac2/fs/reiserfs/inode.c --- linux.21pre4/fs/reiserfs/inode.c 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/fs/reiserfs/inode.c 2003-01-29 17:23:35.000000000 +0000 @@ -2127,10 +2127,11 @@ } } -static int reiserfs_direct_io(int rw, struct inode *inode, +static int reiserfs_direct_io(int rw, struct file *filp, struct kiobuf *iobuf, unsigned long blocknr, int blocksize) { + struct inode * inode = filp->f_dentry->d_inode->i_mapping->host; return generic_direct_IO(rw, inode, iobuf, blocknr, blocksize, reiserfs_get_block_direct_io) ; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/seq_file.c linux.21pre4-ac2/fs/seq_file.c --- linux.21pre4/fs/seq_file.c 2003-01-29 16:26:32.000000000 +0000 +++ linux.21pre4-ac2/fs/seq_file.c 2003-02-04 15:02:42.000000000 +0000 @@ -94,8 +94,10 @@ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); if (!m->buf) goto Enomem; + m->count = 0; } m->op->stop(m, p); + m->count = 0; goto Done; Fill: /* they want more? let's try to get some more */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/fs/super.c linux.21pre4-ac2/fs/super.c --- linux.21pre4/fs/super.c 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/fs/super.c 2003-01-29 17:23:41.000000000 +0000 @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -280,6 +281,8 @@ sema_init(&s->s_dquot.dqio_sem, 1); sema_init(&s->s_dquot.dqoff_sem, 1); s->s_maxbytes = MAX_NON_LFS; + s->dq_op = sb_dquot_ops; + s->s_qcop = sb_quotactl_ops; s->s_op = &empty_sops; } return s; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-alpha/bitops.h linux.21pre4-ac2/include/asm-alpha/bitops.h --- linux.21pre4/include/asm-alpha/bitops.h 2003-01-29 16:26:35.000000000 +0000 +++ linux.21pre4-ac2/include/asm-alpha/bitops.h 2003-01-06 19:17:21.000000000 +0000 @@ -3,6 +3,7 @@ #include #include +#include /* * Copyright 1994, Linus Torvalds. @@ -74,6 +75,17 @@ * WARNING: non atomic version. */ static __inline__ void +__clear_bit(unsigned long nr, volatile void * addr) +{ + int *m = ((int *) addr) + (nr >> 5); + + *m &= ~(1 << (nr & 31)); +} + +/* + * WARNING: non atomic version. + */ +static __inline__ void __change_bit(unsigned long nr, volatile void * addr) { int *m = ((int *) addr) + (nr >> 5); @@ -264,6 +276,28 @@ #endif } +/* + * __ffs = Find First set bit in word. Undefined if no set bit exists. + */ +static inline unsigned long __ffs(unsigned long word) +{ +#if defined(__alpha_cix__) && defined(__alpha_fix__) + /* Whee. EV67 can calculate it directly. */ + unsigned long result; + __asm__("cttz %1,%0" : "=r"(result) : "r"(word)); + return result; +#else + unsigned long bits, qofs, bofs; + + __asm__("cmpbge $31,%1,%0" : "=r"(bits) : "r"(word)); + qofs = ffz_b(bits); + bits = __kernel_extbl(word, qofs); + bofs = ffz_b(~bits); + + return qofs*8 + bofs; +#endif +} + #ifdef __KERNEL__ /* @@ -365,13 +399,77 @@ } /* - * The optimizer actually does good code for this case.. + * Find next one bit in a bitmap reasonably efficiently. + */ +static inline unsigned long +find_next_bit(void * addr, unsigned long size, unsigned long offset) +{ + unsigned long * p = ((unsigned long *) addr) + (offset >> 6); + unsigned long result = offset & ~63UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 63UL; + if (offset) { + tmp = *(p++); + tmp &= ~0UL << offset; + if (size < 64) + goto found_first; + if (tmp) + goto found_middle; + size -= 64; + result += 64; + } + while (size & ~63UL) { + if ((tmp = *(p++))) + goto found_middle; + result += 64; + size -= 64; + } + if (!size) + return result; + tmp = *p; +found_first: + tmp &= ~0UL >> (64 - size); + if (!tmp) + return result + size; +found_middle: + return result + __ffs(tmp); +} + +/* + * The optimizer actually does good code for this case. */ #define find_first_zero_bit(addr, size) \ find_next_zero_bit((addr), (size), 0) +#define find_first_bit(addr, size) \ + find_next_bit((addr), (size), 0) #ifdef __KERNEL__ +/* + * Every architecture must define this function. It's the fastest + * way of searching a 140-bit bitmap where the first 100 bits are + * unlikely to be set. It's guaranteed that at least one of the 140 + * bits is set. + */ +static inline unsigned long +sched_find_first_bit(unsigned long b[3]) +{ + unsigned long b0 = b[0], b1 = b[1], b2 = b[2]; + unsigned long ofs; + + ofs = (b1 ? 64 : 128); + b1 = (b1 ? b1 : b2); + ofs = (b0 ? 0 : ofs); + b0 = (b0 ? b0 : b1); + + return __ffs(b0) + ofs; +} + + #define ext2_set_bit __test_and_set_bit #define ext2_clear_bit __test_and_clear_bit #define ext2_test_bit test_bit diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-alpha/ioctls.h linux.21pre4-ac2/include/asm-alpha/ioctls.h --- linux.21pre4/include/asm-alpha/ioctls.h 2003-01-29 16:26:35.000000000 +0000 +++ linux.21pre4-ac2/include/asm-alpha/ioctls.h 2003-01-06 23:23:14.000000000 +0000 @@ -9,6 +9,7 @@ #define FIONBIO _IOW('f', 126, int) #define FIONREAD _IOR('f', 127, int) #define TIOCINQ FIONREAD +#define FIOQSIZE _IOR('f', 128, loff_t) #define TIOCGETP _IOR('t', 8, struct sgttyb) #define TIOCSETP _IOW('t', 9, struct sgttyb) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-alpha/smp.h linux.21pre4-ac2/include/asm-alpha/smp.h --- linux.21pre4/include/asm-alpha/smp.h 2003-01-29 16:26:35.000000000 +0000 +++ linux.21pre4-ac2/include/asm-alpha/smp.h 2003-01-06 19:17:32.000000000 +0000 @@ -55,7 +55,7 @@ #define cpu_logical_map(cpu) __cpu_logical_map[cpu] #define hard_smp_processor_id() __hard_smp_processor_id() -#define smp_processor_id() (current->processor) +#define smp_processor_id() (current->cpu) extern unsigned long cpu_present_mask; #define cpu_online_map cpu_present_mask diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-alpha/system.h linux.21pre4-ac2/include/asm-alpha/system.h --- linux.21pre4/include/asm-alpha/system.h 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/include/asm-alpha/system.h 2003-01-29 17:23:53.000000000 +0000 @@ -310,11 +310,11 @@ #define __sti() do { barrier(); setipl(IPL_MIN); } while(0) #define __save_flags(flags) ((flags) = rdps()) #define __save_and_cli(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0) -#define __save_and_sti(flags) do { barrier(); (flags) = swpipl(IPL_MIN); } while(0) +#define __save_and_sti(flags) do { (flags) = swpipl(IPL_MIN); barrier(); } while(0) #define __restore_flags(flags) do { barrier(); setipl(flags); barrier(); } while(0) #define local_irq_save(flags) __save_and_cli(flags) -#define local_irq_set(flags) __save_and_sti(flags) +#define local_irq_set(flags) __save_and_sti(flags) #define local_irq_restore(flags) __restore_flags(flags) #define local_irq_disable() __cli() #define local_irq_enable() __sti() @@ -323,8 +323,6 @@ extern int global_irq_holder; -#define save_and_cli(flags) (save_flags(flags), cli()) - extern void __global_cli(void); extern void __global_sti(void); extern unsigned long __global_save_flags(void); @@ -334,6 +332,8 @@ #define sti() __global_sti() #define save_flags(flags) ((flags) = __global_save_flags()) #define restore_flags(flags) __global_restore_flags(flags) +#define save_and_cli(flags) (save_flags(flags), cli()) +#define save_and_sti(flags) (save_flags(flags), sti()) #else /* CONFIG_SMP */ @@ -341,6 +341,7 @@ #define sti() __sti() #define save_flags(flags) __save_flags(flags) #define save_and_cli(flags) __save_and_cli(flags) +#define save_and_sti(flags) __save_and_sti(flags) #define restore_flags(flags) __restore_flags(flags) #endif /* CONFIG_SMP */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-arm/page.h linux.21pre4-ac2/include/asm-arm/page.h --- linux.21pre4/include/asm-arm/page.h 2003-01-29 16:26:36.000000000 +0000 +++ linux.21pre4-ac2/include/asm-arm/page.h 2003-01-06 19:17:32.000000000 +0000 @@ -106,6 +106,9 @@ #define VALID_PAGE(page) ((page - mem_map) < max_mapnr) #endif +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + #endif #endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-cris/ioctls.h linux.21pre4-ac2/include/asm-cris/ioctls.h --- linux.21pre4/include/asm-cris/ioctls.h 2003-01-29 16:26:38.000000000 +0000 +++ linux.21pre4-ac2/include/asm-cris/ioctls.h 2003-01-06 23:23:23.000000000 +0000 @@ -69,6 +69,7 @@ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ #define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ #define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ +#define FIOQSIZE 0x5460 #define TIOCSERSETRS485 0x5460 /* enable rs-485 */ #define TIOCSERWRRS485 0x5461 /* write rs-485 */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-cris/uaccess.h linux.21pre4-ac2/include/asm-cris/uaccess.h --- linux.21pre4/include/asm-cris/uaccess.h 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/include/asm-cris/uaccess.h 2003-01-06 17:31:08.000000000 +0000 @@ -150,25 +150,6 @@ #define __put_user(x,ptr) \ __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) -/* - * The "xxx_ret" versions return constant specified in third argument, if - * something bad happens. These macros can be optimized for the - * case of just returning from the function xxx_ret is used. - */ - -#define put_user_ret(x,ptr,ret) \ - do { if (put_user(x,ptr)) return ret; } while (0) - -#define get_user_ret(x,ptr,ret) \ - do { if (get_user(x,ptr)) return ret; } while (0) - -#define __put_user_ret(x,ptr,ret) \ - do { if (__put_user(x,ptr)) return ret; } while (0) - -#define __get_user_ret(x,ptr,ret) \ - do { if (__get_user(x,ptr)) return ret; } while (0) - - extern long __put_user_bad(void); #define __put_user_nocheck(x,ptr,size) \ @@ -1020,11 +1001,6 @@ __constant_copy_to_user(to, from, n) : \ __generic_copy_to_user(to, from, n)) -#define copy_to_user_ret(to,from,n,retval) \ - do { if (copy_to_user(to,from,n)) return retval; } while (0) -#define copy_from_user_ret(to,from,n,retval) \ - do { if (copy_from_user(to,from,n)) return retval; } while (0) - /* We let the __ versions of copy_from/to_user inline, because they're often * used in fast paths and have only a small space overhead. */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-generic/bitops.h linux.21pre4-ac2/include/asm-generic/bitops.h --- linux.21pre4/include/asm-generic/bitops.h 2003-01-29 16:26:34.000000000 +0000 +++ linux.21pre4-ac2/include/asm-generic/bitops.h 2003-01-06 19:17:41.000000000 +0000 @@ -51,6 +51,12 @@ return ((mask & *addr) != 0); } +/* + * fls: find last bit set. + */ + +#define fls(x) generic_fls(x) + #ifdef __KERNEL__ /* diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-i386/bitops.h linux.21pre4-ac2/include/asm-i386/bitops.h --- linux.21pre4/include/asm-i386/bitops.h 2003-01-29 16:26:34.000000000 +0000 +++ linux.21pre4-ac2/include/asm-i386/bitops.h 2003-02-01 16:51:25.000000000 +0000 @@ -6,6 +6,7 @@ */ #include +#include /* * These have to be done with inline assembly: that way the bit-setting @@ -75,6 +76,14 @@ :"=m" (ADDR) :"Ir" (nr)); } + +static __inline__ void __clear_bit(int nr, volatile void * addr) +{ + __asm__ __volatile__( + "btrl %1,%0" + :"=m" (ADDR) + :"Ir" (nr)); +} #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() @@ -284,6 +293,34 @@ } /** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit-number of the first set bit, not the number of the byte + * containing a bit. + */ +static __inline__ int find_first_bit(void * addr, unsigned size) +{ + int d0, d1; + int res; + + /* This looks at memory. Mark it volatile to tell gcc not to move it around */ + __asm__ __volatile__( + "xorl %%eax,%%eax\n\t" + "repe; scasl\n\t" + "jz 1f\n\t" + "leal -4(%%edi),%%edi\n\t" + "bsfl (%%edi),%%eax\n" + "1:\tsubl %%ebx,%%edi\n\t" + "shll $3,%%edi\n\t" + "addl %%edi,%%eax" + :"=a" (res), "=&c" (d0), "=&D" (d1) + :"1" ((size + 31) >> 5), "2" (addr), "b" (addr)); + return res; +} + +/** * find_next_zero_bit - find the first zero bit in a memory region * @addr: The address to base the search on * @offset: The bitnumber to start searching at @@ -296,7 +333,7 @@ if (bit) { /* - * Look for zero in first byte + * Look for zero in the first 32 bits. */ __asm__("bsfl %1,%0\n\t" "jne 1f\n\t" @@ -317,6 +354,39 @@ } /** + * find_next_bit - find the first set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +static __inline__ int find_next_bit (void * addr, int size, int offset) +{ + unsigned long * p = ((unsigned long *) addr) + (offset >> 5); + int set = 0, bit = offset & 31, res; + + if (bit) { + /* + * Look for nonzero in the first 32 bits: + */ + __asm__("bsfl %1,%0\n\t" + "jne 1f\n\t" + "movl $32, %0\n" + "1:" + : "=r" (set) + : "r" (*p >> bit)); + if (set < (32 - bit)) + return set + offset; + set = 32 - bit; + p++; + } + /* + * No set bit yet, search remaining full words for a bit + */ + res = find_first_bit (p, size - 32 * (p - (unsigned long *) addr)); + return (offset + set + res); +} + +/** * ffz - find first zero in word. * @word: The word to search * @@ -330,8 +400,47 @@ return word; } +/** + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static __inline__ unsigned long __ffs(unsigned long word) +{ + __asm__("bsfl %1,%0" + :"=r" (word) + :"rm" (word)); + return word; +} + +/* + * fls: find last bit set. + */ + +#define fls(x) generic_fls(x) + #ifdef __KERNEL__ +/* + * Every architecture must define this function. It's the fastest + * way of searching a 140-bit bitmap where the first 100 bits are + * unlikely to be set. It's guaranteed that at least one of the 140 + * bits is cleared. + */ +static inline int _sched_find_first_bit(unsigned long *b) +{ + if (unlikely(b[0])) + return __ffs(b[0]); + if (unlikely(b[1])) + return __ffs(b[1]) + 32; + if (unlikely(b[2])) + return __ffs(b[2]) + 64; + if (b[3]) + return __ffs(b[3]) + 96; + return __ffs(b[4]) + 128; +} + /** * ffs - find first bit set * @x: the word to search diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-i386/byteorder.h linux.21pre4-ac2/include/asm-i386/byteorder.h --- linux.21pre4/include/asm-i386/byteorder.h 2003-01-29 16:26:34.000000000 +0000 +++ linux.21pre4-ac2/include/asm-i386/byteorder.h 2003-02-04 14:56:40.000000000 +0000 @@ -24,21 +24,41 @@ return x; } +/* gcc should generate this for open coded C now too. May be worth switching to + it because inline assembly cannot be scheduled. -AK */ static __inline__ __const__ __u16 ___arch__swab16(__u16 x) { - __asm__("xchgb %b0,%h0" /* swap bytes */ \ - : "=q" (x) \ - : "0" (x)); \ + __asm__("xchgb %b0,%h0" /* swap bytes */ + : "=q" (x) + : "0" (x)); return x; } + +static inline __u64 ___arch__swab64(__u64 val) +{ + union { + struct { __u32 a,b; } s; + __u64 u; + } v; + v.u = val; +#ifdef CONFIG_X86_BSWAP + asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" + : "=r" (v.s.a), "=r" (v.s.b) + : "0" (v.s.a), "1" (v.s.b)); +#else + v.s.a = ___arch__swab32(v.s.a); + v.s.b = ___arch__swab32(v.s.b); + asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); +#endif + return v.u; +} + +#define __arch__swab64(x) ___arch__swab64(x) #define __arch__swab32(x) ___arch__swab32(x) #define __arch__swab16(x) ___arch__swab16(x) -#if !defined(__STRICT_ANSI__) || defined(__KERNEL__) -# define __BYTEORDER_HAS_U64__ -# define __SWAB_64_THRU_32__ -#endif +#define __BYTEORDER_HAS_U64__ #endif /* __GNUC__ */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-i386/delay.h linux.21pre4-ac2/include/asm-i386/delay.h --- linux.21pre4/include/asm-i386/delay.h 2003-01-29 16:26:34.000000000 +0000 +++ linux.21pre4-ac2/include/asm-i386/delay.h 2003-01-12 01:37:16.000000000 +0000 @@ -8,13 +8,19 @@ */ extern void __bad_udelay(void); +extern void __bad_ndelay(void); extern void __udelay(unsigned long usecs); +extern void __ndelay(unsigned long usecs); extern void __const_udelay(unsigned long usecs); extern void __delay(unsigned long loops); #define udelay(n) (__builtin_constant_p(n) ? \ ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \ __udelay(n)) + +#define ndelay(n) (__builtin_constant_p(n) ? \ + ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ + __ndelay(n)) #endif /* defined(_I386_DELAY_H) */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-i386/desc.h linux.21pre4-ac2/include/asm-i386/desc.h --- linux.21pre4/include/asm-i386/desc.h 2003-01-29 16:26:34.000000000 +0000 +++ linux.21pre4-ac2/include/asm-i386/desc.h 2003-01-06 19:17:56.000000000 +0000 @@ -18,23 +18,31 @@ * 9 - APM BIOS support * 10 - APM BIOS support * 11 - APM BIOS support + * 12 - PNPBIOS support + * 13 - PNPBIOS support + * 14 - PNPBIOS support + * 15 - PNPBIOS support + * 16 - PNPBIOS support + * 17 - not used + * 18 - not used + * 19 - not used * * The TSS+LDT descriptors are spread out a bit so that every CPU * has an exclusive cacheline for the per-CPU TSS and LDT: * - * 12 - CPU#0 TSS <-- new cacheline - * 13 - CPU#0 LDT - * 14 - not used - * 15 - not used - * 16 - CPU#1 TSS <-- new cacheline - * 17 - CPU#1 LDT - * 18 - not used - * 19 - not used + * 20 - CPU#0 TSS <-- new cacheline + * 21 - CPU#0 LDT + * 22 - not used + * 23 - not used + * 24 - CPU#1 TSS <-- new cacheline + * 25 - CPU#1 LDT + * 26 - not used + * 27 - not used * ... NR_CPUS per-CPU TSS+LDT's if on SMP * * Entry into gdt where to find first TSS. */ -#define __FIRST_TSS_ENTRY 12 +#define __FIRST_TSS_ENTRY 20 #define __FIRST_LDT_ENTRY (__FIRST_TSS_ENTRY+1) #define __TSS(n) (((n)<<2) + __FIRST_TSS_ENTRY) @@ -79,13 +87,13 @@ /* * load one particular LDT into the current CPU */ -static inline void load_LDT (struct mm_struct *mm) +static inline void load_LDT (mm_context_t *pc) { int cpu = smp_processor_id(); - void *segments = mm->context.segments; - int count = LDT_ENTRIES; + void *segments = pc->ldt; + int count = pc->size; - if (!segments) { + if (!count) { segments = &default_ldt[0]; count = 5; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-i386/edd.h linux.21pre4-ac2/include/asm-i386/edd.h --- linux.21pre4/include/asm-i386/edd.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/include/asm-i386/edd.h 2003-01-28 16:15:24.000000000 +0000 @@ -0,0 +1,172 @@ +/* + * linux/include/asm-i386/edd.h + * Copyright (C) 2002 Dell Computer Corporation + * by Matt Domsch + * + * structures and definitions for the int 13h, ax={41,48}h + * BIOS Enhanced Disk Drive Services + * This is based on the T13 group document D1572 Revision 0 (August 14 2002) + * available at http://www.t13.org/docs2002/d1572r0.pdf. It is + * very similar to D1484 Revision 3 http://www.t13.org/docs2002/d1484r3.pdf + * + * In a nutshell, arch/i386/boot/setup.S populates a scratch table + * in the empty_zero_block that contains a list of BIOS-enumerated + * boot devices. + * In arch/i386/kernel/setup.c, this information is + * transferred into the edd structure, and in arch/i386/kernel/edd.c, that + * information is used to identify BIOS boot disk. The code in setup.S + * is very sensitive to the size of these structures. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _ASM_I386_EDD_H +#define _ASM_I386_EDD_H + +#define EDDNR 0x1e9 /* addr of number of edd_info structs at EDDBUF + in empty_zero_block - treat this as 1 byte */ +#define EDDBUF 0x600 /* addr of edd_info structs in empty_zero_block */ +#define EDDMAXNR 6 /* number of edd_info structs starting at EDDBUF */ +#define EDDEXTSIZE 4 /* change these if you muck with the structures */ +#define EDDPARMSIZE 74 +#define CHECKEXTENSIONSPRESENT 0x41 +#define GETDEVICEPARAMETERS 0x48 +#define EDDMAGIC1 0x55AA +#define EDDMAGIC2 0xAA55 + +#ifndef __ASSEMBLY__ + +#define EDD_EXT_FIXED_DISK_ACCESS (1 << 0) +#define EDD_EXT_DEVICE_LOCKING_AND_EJECTING (1 << 1) +#define EDD_EXT_ENHANCED_DISK_DRIVE_SUPPORT (1 << 2) +#define EDD_EXT_64BIT_EXTENSIONS (1 << 3) + +#define EDD_INFO_DMA_BOUNDRY_ERROR_TRANSPARENT (1 << 0) +#define EDD_INFO_GEOMETRY_VALID (1 << 1) +#define EDD_INFO_REMOVABLE (1 << 2) +#define EDD_INFO_WRITE_VERIFY (1 << 3) +#define EDD_INFO_MEDIA_CHANGE_NOTIFICATION (1 << 4) +#define EDD_INFO_LOCKABLE (1 << 5) +#define EDD_INFO_NO_MEDIA_PRESENT (1 << 6) +#define EDD_INFO_USE_INT13_FN50 (1 << 7) + +struct edd_device_params { + u16 length; + u16 info_flags; + u32 num_default_cylinders; + u32 num_default_heads; + u32 sectors_per_track; + u64 number_of_sectors; + u16 bytes_per_sector; + u32 dpte_ptr; /* 0xFFFFFFFF for our purposes */ + u16 key; /* = 0xBEDD */ + u8 device_path_info_length; /* = 44 */ + u8 reserved2; + u16 reserved3; + u8 host_bus_type[4]; + u8 interface_type[8]; + union { + struct { + u16 base_address; + u16 reserved1; + u32 reserved2; + } __attribute__ ((packed)) isa; + struct { + u8 bus; + u8 slot; + u8 function; + u8 channel; + u32 reserved; + } __attribute__ ((packed)) pci; + /* pcix is same as pci */ + struct { + u64 reserved; + } __attribute__ ((packed)) ibnd; + struct { + u64 reserved; + } __attribute__ ((packed)) xprs; + struct { + u64 reserved; + } __attribute__ ((packed)) htpt; + struct { + u64 reserved; + } __attribute__ ((packed)) unknown; + } interface_path; + union { + struct { + u8 device; + u8 reserved1; + u16 reserved2; + u32 reserved3; + u64 reserved4; + } __attribute__ ((packed)) ata; + struct { + u8 device; + u8 lun; + u8 reserved1; + u8 reserved2; + u32 reserved3; + u64 reserved4; + } __attribute__ ((packed)) atapi; + struct { + u16 id; + u64 lun; + u16 reserved1; + u32 reserved2; + } __attribute__ ((packed)) scsi; + struct { + u64 serial_number; + u64 reserved; + } __attribute__ ((packed)) usb; + struct { + u64 eui; + u64 reserved; + } __attribute__ ((packed)) i1394; + struct { + u64 wwid; + u64 lun; + } __attribute__ ((packed)) fibre; + struct { + u64 identity_tag; + u64 reserved; + } __attribute__ ((packed)) i2o; + struct { + u32 array_number; + u32 reserved1; + u64 reserved2; + } __attribute((packed)) raid; + struct { + u8 device; + u8 reserved1; + u16 reserved2; + u32 reserved3; + u64 reserved4; + } __attribute__ ((packed)) sata; + struct { + u64 reserved1; + u64 reserved2; + } __attribute__ ((packed)) unknown; + } device_path; + u8 reserved4; + u8 checksum; +} __attribute__ ((packed)); + +struct edd_info { + u8 device; + u8 version; + u16 interface_support; + struct edd_device_params params; +} __attribute__ ((packed)); + +extern struct edd_info edd[EDDMAXNR]; +extern unsigned char eddnr; +#endif /*!__ASSEMBLY__ */ + +#endif /* _ASM_I386_EDD_H */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-i386/hardirq.h linux.21pre4-ac2/include/asm-i386/hardirq.h --- linux.21pre4/include/asm-i386/hardirq.h 2003-01-29 16:26:34.000000000 +0000 +++ linux.21pre4-ac2/include/asm-i386/hardirq.h 2003-02-04 15:54:22.000000000 +0000 @@ -12,7 +12,11 @@ unsigned int __local_bh_count; unsigned int __syscall_count; struct task_struct * __ksoftirqd_task; /* waitqueue is too large */ + unsigned long idle_timestamp; unsigned int __nmi_count; /* arch dependent */ +#if CONFIG_X86_LOCAL_APIC + unsigned int apic_timer_irqs; /* arch dependent */ +#endif } ____cacheline_aligned irq_cpustat_t; #include /* Standard mappings for irq_cpustat_t above */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-i386/highmem.h linux.21pre4-ac2/include/asm-i386/highmem.h --- linux.21pre4/include/asm-i386/highmem.h 2003-01-29 16:26:34.000000000 +0000 +++ linux.21pre4-ac2/include/asm-i386/highmem.h 2003-02-04 15:54:22.000000000 +0000 @@ -46,7 +46,7 @@ * easily, subsequent pte tables have to be allocated in one physical * chunk of RAM. */ -#define PKMAP_BASE (0xfe000000UL) +#define PKMAP_BASE (0xff800000UL) #ifdef CONFIG_X86_PAE #define LAST_PKMAP 512 #else diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-i386/hw_irq.h linux.21pre4-ac2/include/asm-i386/hw_irq.h --- linux.21pre4/include/asm-i386/hw_irq.h 2003-01-29 16:26:34.000000000 +0000 +++ linux.21pre4-ac2/include/asm-i386/hw_irq.h 2003-02-04 15:54:22.000000000 +0000 @@ -13,8 +13,10 @@ */ #include +#include #include #include +#include /* * IDT vectors usable for external interrupt sources start diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-i386/ioctls.h linux.21pre4-ac2/include/asm-i386/ioctls.h --- linux.21pre4/include/asm-i386/ioctls.h 2003-01-29 16:26:34.000000000 +0000 +++ linux.21pre4-ac2/include/asm-i386/ioctls.h 2003-01-06 23:23:33.000000000 +0000 @@ -67,6 +67,7 @@ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ #define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ #define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ +#define FIOQSIZE 0x5460 /* Used for packet mode */ #define TIOCPKT_DATA 0 diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-i386/mmu_context.h linux.21pre4-ac2/include/asm-i386/mmu_context.h --- linux.21pre4/include/asm-i386/mmu_context.h 2003-01-29 16:26:34.000000000 +0000 +++ linux.21pre4-ac2/include/asm-i386/mmu_context.h 2003-02-04 15:54:23.000000000 +0000 @@ -7,10 +7,12 @@ #include /* - * possibly do the LDT unload here? + * hooks to add arch specific data into the mm struct. + * Note that destroy_context is called even if init_new_context + * fails. */ -#define destroy_context(mm) do { } while(0) -#define init_new_context(tsk,mm) 0 +int init_new_context(struct task_struct *tsk, struct mm_struct *mm); +void destroy_context(struct mm_struct *mm); #ifdef CONFIG_SMP @@ -27,22 +29,21 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu) { - if (prev != next) { + if (likely(prev != next)) { /* stop flush ipis for the previous mm */ clear_bit(cpu, &prev->cpu_vm_mask); - /* - * Re-load LDT if necessary - */ - if (prev->context.segments != next->context.segments) - load_LDT(next); #ifdef CONFIG_SMP cpu_tlbstate[cpu].state = TLBSTATE_OK; cpu_tlbstate[cpu].active_mm = next; #endif set_bit(cpu, &next->cpu_vm_mask); - set_bit(cpu, &next->context.cpuvalid); /* Re-load page tables */ load_cr3(next->pgd); + /* load_LDT, if either the previous or next thread + * has a non-default LDT. + */ + if (unlikely(next->context.size+prev->context.size)) + load_LDT(&next->context); } #ifdef CONFIG_SMP else { @@ -54,9 +55,8 @@ * tlb flush IPI delivery. We must reload %cr3. */ load_cr3(next->pgd); + load_LDT(&next->context); } - if (!test_and_set_bit(cpu, &next->context.cpuvalid)) - load_LDT(next); } #endif } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-i386/mmu.h linux.21pre4-ac2/include/asm-i386/mmu.h --- linux.21pre4/include/asm-i386/mmu.h 2003-01-29 16:26:34.000000000 +0000 +++ linux.21pre4-ac2/include/asm-i386/mmu.h 2003-01-06 19:18:12.000000000 +0000 @@ -4,10 +4,13 @@ /* * The i386 doesn't have a mmu context, but * we put the segment information here. + * + * cpu_vm_mask is used to optimize ldt flushing. */ typedef struct { - void *segments; - unsigned long cpuvalid; + int size; + struct semaphore sem; + void * ldt; } mm_context_t; #endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-i386/pgalloc.h linux.21pre4-ac2/include/asm-i386/pgalloc.h --- linux.21pre4/include/asm-i386/pgalloc.h 2003-01-29 16:26:34.000000000 +0000 +++ linux.21pre4-ac2/include/asm-i386/pgalloc.h 2003-02-04 15:54:22.000000000 +0000 @@ -224,7 +224,7 @@ { struct mm_struct *active_mm; int state; -}; +} ____cacheline_aligned; extern struct tlb_state cpu_tlbstate[NR_CPUS]; #endif /* CONFIG_SMP */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-i386/processor.h linux.21pre4-ac2/include/asm-i386/processor.h --- linux.21pre4/include/asm-i386/processor.h 2003-01-29 16:26:34.000000000 +0000 +++ linux.21pre4-ac2/include/asm-i386/processor.h 2003-02-01 16:51:26.000000000 +0000 @@ -435,9 +435,12 @@ */ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); -/* Copy and release all segment info associated with a VM */ -extern void copy_segments(struct task_struct *p, struct mm_struct * mm); -extern void release_segments(struct mm_struct * mm); +/* Copy and release all segment info associated with a VM + * Unusable due to lack of error handling, use {init_new,destroy}_context + * instead. + */ +static inline void copy_segments(struct task_struct *p, struct mm_struct * mm) { } +static inline void release_segments(struct mm_struct * mm) { } /* * Return saved PC of a blocked thread. diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-i386/smpboot.h linux.21pre4-ac2/include/asm-i386/smpboot.h --- linux.21pre4/include/asm-i386/smpboot.h 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/include/asm-i386/smpboot.h 2003-01-31 11:10:33.000000000 +0000 @@ -30,6 +30,15 @@ /*Start cyclone clock*/ cyclone_setup(0); } + else if (!strncmp(oem, "IBM ENSW", 8) && !strncmp(prod, "RUTHLESS SMP", 9)){ + clustered_apic_mode = CLUSTERED_APIC_XAPIC; + apic_broadcast_id = APIC_BROADCAST_ID_XAPIC; + int_dest_addr_mode = APIC_DEST_PHYSICAL; + int_delivery_mode = dest_Fixed; + esr_disable = 1; + /*Start cyclone clock*/ + cyclone_setup(0); + } else if (!strncmp(oem, "IBM NUMA", 8)){ clustered_apic_mode = CLUSTERED_APIC_NUMAQ; apic_broadcast_id = APIC_BROADCAST_ID_APIC; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-i386/smp.h linux.21pre4-ac2/include/asm-i386/smp.h --- linux.21pre4/include/asm-i386/smp.h 2003-01-29 16:26:34.000000000 +0000 +++ linux.21pre4-ac2/include/asm-i386/smp.h 2003-02-04 15:54:22.000000000 +0000 @@ -40,6 +40,7 @@ extern void smp_flush_tlb(void); extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs); extern void smp_send_reschedule(int cpu); +extern void smp_send_reschedule_all(void); extern void smp_invalidate_rcv(void); /* Process an NMI */ extern void (*mtrr_hook) (void); extern void zap_low_mappings (void); @@ -58,6 +59,8 @@ return cpu; } +#define cpu_online(cpu) (cpu_online_map & (1<<(cpu))) + /* * Some lowlevel functions might want to know about * the real APIC ID <-> CPU # mapping. @@ -81,7 +84,7 @@ * so this is correct in the x86 case. */ -#define smp_processor_id() (current->processor) +#define smp_processor_id() (current->cpu) static __inline int hard_smp_processor_id(void) { diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-i386/system.h linux.21pre4-ac2/include/asm-i386/system.h --- linux.21pre4/include/asm-i386/system.h 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/include/asm-i386/system.h 2003-02-04 15:54:22.000000000 +0000 @@ -12,25 +12,22 @@ struct task_struct; /* one of the stranger aspects of C forward declarations.. */ extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); -#define prepare_to_switch() do { } while(0) #define switch_to(prev,next,last) do { \ asm volatile("pushl %%esi\n\t" \ "pushl %%edi\n\t" \ "pushl %%ebp\n\t" \ "movl %%esp,%0\n\t" /* save ESP */ \ - "movl %3,%%esp\n\t" /* restore ESP */ \ + "movl %2,%%esp\n\t" /* restore ESP */ \ "movl $1f,%1\n\t" /* save EIP */ \ - "pushl %4\n\t" /* restore EIP */ \ + "pushl %3\n\t" /* restore EIP */ \ "jmp __switch_to\n" \ "1:\t" \ "popl %%ebp\n\t" \ "popl %%edi\n\t" \ "popl %%esi\n\t" \ - :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ - "=b" (last) \ + :"=m" (prev->thread.esp),"=m" (prev->thread.eip) \ :"m" (next->thread.esp),"m" (next->thread.eip), \ - "a" (prev), "d" (next), \ - "b" (prev)); \ + "a" (prev), "d" (next)); \ } while (0) #define _set_base(addr,base) do { unsigned long __pr; \ @@ -322,18 +319,19 @@ /* used in the idle loop; sti takes one instruction cycle to complete */ #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") -#define __save_and_cli(x) do { __save_flags(x); __cli(); } while(0); -#define __save_and_sti(x) do { __save_flags(x); __sti(); } while(0); +#define __save_and_cli(x) do { __save_flags(x); __cli(); } while(0); +#define __save_and_sti(x) do { __save_flags(x); __sti(); } while(0); /* For spinlocks etc */ #if 0 #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory") -#define local_irq_set(x) __asm__ __volatile__("pushfl ; popl %0 ; sti":"=g" (x): /* no input */ :"memory") -#else -#define local_irq_save(x) __save_and_cli(x) -#define local_irq_set(x) __save_and_sti(x) +#define local_irq_set(x) __asm__ __volatile__("pushfl ; popl %0 ; sti":"=g" (x): /* no input */ :"memory") +#else +#define local_irq_save(x) __save_and_cli(x) +#define local_irq_set(x) __save_and_sti(x) #endif + #define local_irq_restore(x) __restore_flags(x) #define local_irq_disable() __cli() #define local_irq_enable() __sti() @@ -348,8 +346,8 @@ #define sti() __global_sti() #define save_flags(x) ((x)=__global_save_flags()) #define restore_flags(x) __global_restore_flags(x) -#define save_and_cli(x) do { save_flags(x); cli(); } while(0); -#define save_and_sti(x) do { save_flags(x); sti(); } while(0); +#define save_and_cli(x) do { save_flags(x); cli(); } while(0) +#define save_and_sti(x) do { save_flags(x); sti(); } while(0) #else diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-ia64/ioctls.h linux.21pre4-ac2/include/asm-ia64/ioctls.h --- linux.21pre4/include/asm-ia64/ioctls.h 2003-01-29 16:26:37.000000000 +0000 +++ linux.21pre4-ac2/include/asm-ia64/ioctls.h 2003-01-06 23:23:46.000000000 +0000 @@ -72,6 +72,7 @@ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ #define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ #define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ +#define FIOQSIZE 0x5460 /* Used for packet mode */ #define TIOCPKT_DATA 0 diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-m68k/ioctls.h linux.21pre4-ac2/include/asm-m68k/ioctls.h --- linux.21pre4/include/asm-m68k/ioctls.h 2003-01-29 16:26:35.000000000 +0000 +++ linux.21pre4-ac2/include/asm-m68k/ioctls.h 2003-01-06 23:23:55.000000000 +0000 @@ -65,6 +65,7 @@ #define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ +#define FIOQSIZE 0x545E /* Used for packet mode */ #define TIOCPKT_DATA 0 diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-m68k/machdep.h linux.21pre4-ac2/include/asm-m68k/machdep.h --- linux.21pre4/include/asm-m68k/machdep.h 2003-01-29 16:26:35.000000000 +0000 +++ linux.21pre4-ac2/include/asm-m68k/machdep.h 2003-01-28 16:35:25.000000000 +0000 @@ -5,6 +5,7 @@ struct kbd_repeat; struct mktime; struct rtc_time; +struct rtc_pll_info; struct gendisk; struct buffer_head; @@ -29,6 +30,9 @@ extern void (*mach_gettod)(int *year, int *mon, int *day, int *hour, int *min, int *sec); extern int (*mach_hwclk)(int, struct rtc_time*); +extern unsigned int (*mach_get_ss)(void); +extern int (*mach_get_rtc_pll)(struct rtc_pll_info *); +extern int (*mach_set_rtc_pll)(struct rtc_pll_info *); extern int (*mach_set_clock_mmss)(unsigned long); extern void (*mach_reset)( void ); extern void (*mach_halt)( void ); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-m68k/rtc.h linux.21pre4-ac2/include/asm-m68k/rtc.h --- linux.21pre4/include/asm-m68k/rtc.h 2003-01-29 16:26:35.000000000 +0000 +++ linux.21pre4-ac2/include/asm-m68k/rtc.h 2003-01-28 16:35:25.000000000 +0000 @@ -13,24 +13,64 @@ #ifdef __KERNEL__ -#include #include -#include +#include #include #define RTC_PIE 0x40 /* periodic interrupt enable */ #define RTC_AIE 0x20 /* alarm interrupt enable */ #define RTC_UIE 0x10 /* update-finished interrupt enable */ -extern void gen_rtc_interrupt(unsigned long); - /* some dummy definitions */ +#define RTC_BATT_BAD 0x100 /* battery bad */ #define RTC_SQWE 0x08 /* enable square-wave output */ #define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */ #define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */ #define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */ - +static inline unsigned int get_rtc_time(struct rtc_time *time) +{ + /* + * Only the values that we read from the RTC are set. We leave + * tm_wday, tm_yday and tm_isdst untouched. Even though the + * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated + * by the RTC when initially set to a non-zero value. + */ + mach_hwclk(0, time); + return RTC_24H; +} + +static inline int set_rtc_time(struct rtc_time *time) +{ + return mach_hwclk(1, time); +} + +static inline unsigned int get_rtc_ss(void) +{ + if (mach_get_ss) + return mach_get_ss(); + else{ + struct rtc_time h; + + get_rtc_time(&h); + return h.tm_sec; + } +} + +static inline int get_rtc_pll(struct rtc_pll_info *pll) +{ + if (mach_get_rtc_pll) + return mach_get_rtc_pll(pll); + else + return -EINVAL; +} +static inline int set_rtc_pll(struct rtc_pll_info *pll) +{ + if (mach_set_rtc_pll) + return mach_set_rtc_pll(pll); + else + return -EINVAL; +} #endif /* __KERNEL__ */ #endif /* _ASM__RTC_H */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-mips/sibyte/swarm_ide.h linux.21pre4-ac2/include/asm-mips/sibyte/swarm_ide.h --- linux.21pre4/include/asm-mips/sibyte/swarm_ide.h 2003-01-29 16:26:35.000000000 +0000 +++ linux.21pre4-ac2/include/asm-mips/sibyte/swarm_ide.h 2003-01-06 19:19:26.000000000 +0000 @@ -25,8 +25,23 @@ #define SWARM_IDE_REG(pcaddr) (SWARM_IDE_BASE + ((pcaddr)<<5)) #define SWARM_IDE_INT (K_INT_GPIO_4) -extern ide_ideproc_t swarm_ideproc; +#ifdef __IDE_SWARM_C +static inline void swarm_outb (u8 val, unsigned long port) +{ + *(volatile u8 *)(mips_io_port_base + (port)) = val; +} + +static inline void swarm_outw (u16 val, unsigned long port) +{ + *(volatile u16 *)(mips_io_port_base + (port)) = val; +} + +static inline void swarm_outl (u32 val, unsigned long port) +{ + *(volatile u32 *)(mips_io_port_base + (port)) = val; +} +#else /* !__IDE_SWARM_C */ #define swarm_outb(val,port) \ do { \ *(volatile u8 *)(mips_io_port_base + (port)) = val; \ @@ -41,6 +56,7 @@ do { \ *(volatile u32 *)(mips_io_port_base + (port)) = val;\ } while(0) +#endif /* __IDE_SWARM_C */ static inline unsigned char swarm_inb(unsigned long port) { diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-mips64/sibyte/swarm_ide.h linux.21pre4-ac2/include/asm-mips64/sibyte/swarm_ide.h --- linux.21pre4/include/asm-mips64/sibyte/swarm_ide.h 2003-01-29 16:26:38.000000000 +0000 +++ linux.21pre4-ac2/include/asm-mips64/sibyte/swarm_ide.h 2003-01-06 17:32:06.000000000 +0000 @@ -25,8 +25,22 @@ #define SWARM_IDE_REG(pcaddr) (SWARM_IDE_BASE + ((pcaddr)<<5)) #define SWARM_IDE_INT (K_INT_GPIO_4) -extern ide_ideproc_t swarm_ideproc; +#ifdef __IDE_SWARM_C +static inline void swarm_outb (u8 val, unsigned long port) +{ + *(volatile u8 *)(mips_io_port_base + (port)) = val; +} + +static inline void swarm_outw (u16 val, unsigned long port) +{ + *(volatile u16 *)(mips_io_port_base + (port)) = val; +} +static inline void swarm_outl (u32 val, unsigned long port) +{ + *(volatile u32 *)(mips_io_port_base + (port)) = val; +} +#else /* !__IDE_SWARM_C */ #define swarm_outb(val,port) \ do { \ *(volatile u8 *)(mips_io_port_base + (port)) = val; \ @@ -41,6 +55,7 @@ do { \ *(volatile u32 *)(mips_io_port_base + (port)) = val;\ } while(0) +#endif /* __IDE_SWARM_C */ static inline unsigned char swarm_inb(unsigned long port) { @@ -57,7 +72,6 @@ return (*(volatile u32 *)(mips_io_port_base + port)); } - static inline void swarm_outsb(unsigned long port, void *addr, unsigned int count) { while (count--) { diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-parisc/rtc.h linux.21pre4-ac2/include/asm-parisc/rtc.h --- linux.21pre4/include/asm-parisc/rtc.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/include/asm-parisc/rtc.h 2003-01-28 16:35:25.000000000 +0000 @@ -0,0 +1,132 @@ +/* include/asm-parisc/rtc.h */ + +#ifndef _ASM_RTC_H +#define _ASM_RTC_H + +#ifdef __KERNEL__ + +#include +#include + +#define RTC_PIE 0x40 /* periodic interrupt enable */ +#define RTC_AIE 0x20 /* alarm interrupt enable */ +#define RTC_UIE 0x10 /* update-finished interrupt enable */ + +#define RTC_BATT_BAD 0x100 /* battery bad */ + +/* some dummy definitions */ +#define RTC_SQWE 0x08 /* enable square-wave output */ +#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */ +#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */ +#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */ + + +/* constants for calculation */ +#define SECS_PER_HOUR (60 * 60) +#define SECS_PER_DAY (SECS_PER_HOUR * 24) + +#define __isleap(year) \ + ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0)) + + +/* How many days come before each month (0-12). */ +static const unsigned short int __mon_yday[2][13] = +{ + /* Normal years. */ + { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, + /* Leap years. */ + { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } +}; + +static inline unsigned int get_rtc_time(struct rtc_time *wtime) +{ + /* + * Only the values that we read from the RTC are set. We leave + * tm_wday, tm_yday and tm_isdst untouched. Even though the + * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated + * by the RTC when initially set to a non-zero value. + */ + + struct pdc_tod tod_data; + long int days, rem, y; + const unsigned short int *ip; + + if (pdc_tod_read(&tod_data) < 0) + return (RTC_24H | RTC_BATT_BAD); + + + // most of the remainder of this function is: + // Copyright (C) 1991, 1993, 1997, 1998 Free Software Foundation, Inc. + // This was originally a part of the GNU C Library. + // It is distributed under the GPL, and was swiped from offtime.c + + + days = tod_data.tod_sec / SECS_PER_DAY; + rem = tod_data.tod_sec % SECS_PER_DAY; + + wtime->tm_hour = rem / SECS_PER_HOUR; + rem %= SECS_PER_HOUR; + wtime->tm_min = rem / 60; + wtime->tm_sec = rem % 60; + + y = 1970; + +#define DIV(a, b) ((a) / (b) - ((a) % (b) < 0)) +#define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400)) + + while (days < 0 || days >= (__isleap (y) ? 366 : 365)) + { + /* Guess a corrected year, assuming 365 days per year. */ + long int yg = y + days / 365 - (days % 365 < 0); + + /* Adjust DAYS and Y to match the guessed year. */ + days -= ((yg - y) * 365 + + LEAPS_THRU_END_OF (yg - 1) + - LEAPS_THRU_END_OF (y - 1)); + y = yg; + } + wtime->tm_year = y - 1900; +#undef DIV +#undef LEAPS_THRU_END_OF + + ip = __mon_yday[__isleap(y)]; + for (y = 11; days < (long int) ip[y]; --y) + continue; + days -= ip[y]; + wtime->tm_mon = y; + wtime->tm_mday = days + 1; + + return (RTC_24H); +} + +static inline int set_rtc_time(struct rtc_time *wtime) +{ + u_int32_t secs; + + secs = mktime(wtime->tm_year + 1900, wtime->tm_mon + 1, wtime->tm_mday, + wtime->tm_hour, wtime->tm_min, wtime->tm_sec); + + if (pdc_tod_set(secs, 0) < 0) + return -EINVAL; + else + return 0; +} + +static inline unsigned int get_rtc_ss(void) +{ + return -EINVAL; +} + +static inline int get_rtc_pll(struct rtc_pll_info *pll) +{ + return -EINVAL; +} +static inline int set_rtc_pll(struct rtc_pll_info *pll) +{ + return -EINVAL; +} + +#endif /* __KERNEL__ */ + +#endif /* _ASM__RTC_H */ + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-ppc/bitops.h linux.21pre4-ac2/include/asm-ppc/bitops.h --- linux.21pre4/include/asm-ppc/bitops.h 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/include/asm-ppc/bitops.h 2003-01-07 15:10:36.000000000 +0000 @@ -10,6 +10,7 @@ #define _PPC_BITOPS_H #include +#include #include /* @@ -237,6 +238,13 @@ return __ilog2(x & -x); } +#ifdef __KERNEL__ + +static inline int __ffs(unsigned long x) +{ + return __ilog2(x & -x); +} + /* * ffs: find first bit set. This is defined the same way as * the libc and compiler builtin ffs routines, therefore @@ -248,6 +256,18 @@ } /* + * fls: find last (most-significant) bit set. + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. + */ +static __inline__ int fls(unsigned int x) +{ + int lz; + + asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); + return 32 - lz; +} + +/* * hweightN: returns the hamming weight (i.e. the number * of bits set) of a N-bit word */ @@ -256,6 +276,25 @@ #define hweight16(x) generic_hweight16(x) #define hweight8(x) generic_hweight8(x) +#endif /* __KERNEL__ */ + +/* + * Find the first bit set in a 140-bit bitmap. + * The first 100 bits are unlikely to be set. + */ +static inline int _sched_find_first_bit(unsigned long *b) +{ + if (unlikely(b[0])) + return __ffs(b[0]); + if (unlikely(b[1])) + return __ffs(b[1]) + 32; + if (unlikely(b[2])) + return __ffs(b[2]) + 64; + if (b[3]) + return __ffs(b[3]) + 96; + return __ffs(b[4]) + 128; +} + /* * This implementation of find_{first,next}_zero_bit was stolen from * Linus' asm-alpha/bitops.h. @@ -302,6 +341,8 @@ } +#ifdef __KERNEL__ + #define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, addr) #define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, addr) @@ -365,5 +406,7 @@ #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr) #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size) +#endif /* __KERNEL__ */ + #endif /* _PPC_BITOPS_H */ #endif /* __KERNEL__ */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-ppc/rtc.h linux.21pre4-ac2/include/asm-ppc/rtc.h --- linux.21pre4/include/asm-ppc/rtc.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/include/asm-ppc/rtc.h 2003-02-04 14:58:59.000000000 +0000 @@ -0,0 +1,98 @@ +/* + * inclue/asm-ppc/rtc.h + * + * Author: Tom Rini + * + * 2002 (c) MontaVista, Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + * + * Based on: + * include/asm-m68k/rtc.h + * + * Copyright Richard Zidlicky + * implementation details for genrtc/q40rtc driver + * + * And the old drivers/macintosh/rtc.c which was heavily based on: + * Linux/SPARC Real Time Clock Driver + * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu) + * + * With additional work by Paul Mackerras and Franz Sirl. + */ + +#ifndef __ASM_RTC_H__ +#define __ASM_RTC_H__ + +#ifdef __KERNEL__ + +#include + +#include +#include + +#define RTC_PIE 0x40 /* periodic interrupt enable */ +#define RTC_AIE 0x20 /* alarm interrupt enable */ +#define RTC_UIE 0x10 /* update-finished interrupt enable */ + +extern void gen_rtc_interrupt(unsigned long); + +/* some dummy definitions */ +#define RTC_BATT_BAD 0x100 /* battery bad */ +#define RTC_SQWE 0x08 /* enable square-wave output */ +#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */ +#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */ +#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */ + +static inline unsigned int get_rtc_time(struct rtc_time *time) +{ + if (ppc_md.get_rtc_time) { + unsigned long nowtime; + + nowtime = (ppc_md.get_rtc_time)(); + + to_tm(nowtime, time); + + time->tm_year -= 1900; + time->tm_mon -= 1; /* Make sure userland has a 0-based month */ + } + + return RTC_24H; +} + +/* Set the current date and time in the real time clock. */ +static inline int set_rtc_time(struct rtc_time *time) +{ + if (ppc_md.get_rtc_time) { + unsigned long nowtime; + + nowtime = mktime(time->tm_year+1900, time->tm_mon+1, + time->tm_mday, time->tm_hour, time->tm_min, + time->tm_sec); + + (ppc_md.set_rtc_time)(nowtime); + + return 0; + } else + return -EINVAL; +} + +static inline unsigned int get_rtc_ss(void) +{ + struct rtc_time h; + + get_rtc_time(&h); + return h.tm_sec; +} + +static inline int get_rtc_pll(struct rtc_pll_info *pll) +{ + return -EINVAL; +} +static inline int set_rtc_pll(struct rtc_pll_info *pll) +{ + return -EINVAL; +} + +#endif /* __KERNEL__ */ +#endif /* __ASM_RTC_H__ */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-sparc/ioctls.h linux.21pre4-ac2/include/asm-sparc/ioctls.h --- linux.21pre4/include/asm-sparc/ioctls.h 2003-01-29 16:26:35.000000000 +0000 +++ linux.21pre4-ac2/include/asm-sparc/ioctls.h 2003-01-06 23:24:08.000000000 +0000 @@ -86,6 +86,7 @@ #define FIONBIO _IOW('f', 126, int) #define FIONREAD _IOR('f', 127, int) #define TIOCINQ FIONREAD +#define FIOQSIZE _IOR('f', 128, loff_t) /* SCARY Rutgers local SunOS kernel hackery, perhaps I will support it * someday. This is completely bogus, I know... diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-sparc64/ioctls.h linux.21pre4-ac2/include/asm-sparc64/ioctls.h --- linux.21pre4/include/asm-sparc64/ioctls.h 2003-01-29 16:26:36.000000000 +0000 +++ linux.21pre4-ac2/include/asm-sparc64/ioctls.h 2003-01-06 23:24:17.000000000 +0000 @@ -87,6 +87,7 @@ #define FIONBIO _IOW('f', 126, int) #define FIONREAD _IOR('f', 127, int) #define TIOCINQ FIONREAD +#define FIOQSIZE _IOR('f', 128, loff_t) /* SCARY Rutgers local SunOS kernel hackery, perhaps I will support it * someday. This is completely bogus, I know... diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/asm-x86_64/system.h linux.21pre4-ac2/include/asm-x86_64/system.h --- linux.21pre4/include/asm-x86_64/system.h 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/include/asm-x86_64/system.h 2003-02-04 14:55:29.000000000 +0000 @@ -246,8 +246,8 @@ /* used in the idle loop; sti takes one instruction cycle to complete */ #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") -#define __save_and_cli(x) do { __save_flags(x); __cli(); } while(0); -#define __save_and_sti(x) do { __save_flags(x); __sti(); } while(0); +#define __save_and_cli(x) do { __save_flags(x); __cli(); } while(0) +#define __save_and_sti(x) do { __save_flags(x); __sti(); } while(0) /* For spinlocks etc */ #define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0) @@ -266,8 +266,8 @@ #define sti() __global_sti() #define save_flags(x) ((x)=__global_save_flags()) #define restore_flags(x) __global_restore_flags(x) -#define save_and_cli(x) do { save_flags(x); cli(); } while(0); -#define save_and_sti(x) do { save_flags(x); sti(); } while(0); +#define save_and_cli(x) do { save_flags(x); cli(); } while(0) +#define save_and_sti(x) do { save_flags(x); sti(); } while(0) #else diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/acct.h linux.21pre4-ac2/include/linux/acct.h --- linux.21pre4/include/linux/acct.h 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/include/linux/acct.h 2003-02-01 16:51:25.000000000 +0000 @@ -56,7 +56,9 @@ comp_t ac_swaps; /* Accounting Number of Swaps */ __u32 ac_exitcode; /* Accounting Exitcode */ char ac_comm[ACCT_COMM + 1]; /* Accounting Command Name */ - char ac_pad[10]; /* Accounting Padding Bytes */ + char ac_pad[2]; /* Accounting Padding Bytes */ + __u32 ac_uid32; /* 32 bit UID */ + __u32 ac_gid32; /* 32 bit GID */ }; /* diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/agp_backend.h linux.21pre4-ac2/include/linux/agp_backend.h --- linux.21pre4/include/linux/agp_backend.h 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/include/linux/agp_backend.h 2003-01-06 15:38:09.000000000 +0000 @@ -60,7 +60,6 @@ VIA_APOLLO_PRO, VIA_APOLLO_KX133, VIA_APOLLO_KT133, - VIA_APOLLO_P4X400, VIA_APOLLO_KT400, SIS_GENERIC, AMD_GENERIC, diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/binfmts.h linux.21pre4-ac2/include/linux/binfmts.h --- linux.21pre4/include/linux/binfmts.h 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/include/linux/binfmts.h 2003-02-04 15:54:22.000000000 +0000 @@ -16,6 +16,8 @@ #ifdef __KERNEL__ +struct file; + /* * This structure is used to hold the arguments that are used when loading binaries. */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/bitops.h linux.21pre4-ac2/include/linux/bitops.h --- linux.21pre4/include/linux/bitops.h 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/include/linux/bitops.h 2003-02-01 16:51:25.000000000 +0000 @@ -1,6 +1,6 @@ #ifndef _LINUX_BITOPS_H #define _LINUX_BITOPS_H - +#include /* * ffs: find first bit set. This is defined the same way as @@ -38,6 +38,47 @@ } /* + * fls: find last bit set. + */ + +extern __inline__ int generic_fls(int x) +{ + int r = 32; + + if (!x) + return 0; + if (!(x & 0xffff0000)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xff000000)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xf0000000)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xc0000000)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000)) { + x <<= 1; + r -= 1; + } + return r; +} + +extern __inline__ int get_bitmask_order(unsigned int count) +{ + int order; + + order = fls(count); + return order; /* We could be slightly more clever with -1 here... */ +} + +/* * hweightN: returns the hamming weight (i.e. the number * of bits set) of a N-bit word */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/cpufreq.h linux.21pre4-ac2/include/linux/cpufreq.h --- linux.21pre4/include/linux/cpufreq.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/include/linux/cpufreq.h 2003-02-01 16:51:25.000000000 +0000 @@ -0,0 +1,267 @@ +/* + * linux/include/linux/cpufreq.h + * + * Copyright (C) 2001 Russell King + * (C) 2002 Dominik Brodowski + * + * + * $Id: cpufreq.h,v 1.32 2003/01/02 22:16:27 db Exp $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _LINUX_CPUFREQ_H +#define _LINUX_CPUFREQ_H + +#include +#include +#include + + +/********************************************************************* + * CPUFREQ NOTIFIER INTERFACE * + *********************************************************************/ + +int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); +int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); + +#define CPUFREQ_TRANSITION_NOTIFIER (0) +#define CPUFREQ_POLICY_NOTIFIER (1) + +#define CPUFREQ_ALL_CPUS ((NR_CPUS)) + + +/********************** cpufreq policy notifiers *********************/ + +#define CPUFREQ_POLICY_POWERSAVE (1) +#define CPUFREQ_POLICY_PERFORMANCE (2) + +/* Frequency values here are CPU kHz so that hardware which doesn't run + * with some frequencies can complain without having to guess what per + * cent / per mille means. + * Maximum transition latency is in nanoseconds - if it's unknown, + * CPUFREQ_ETERNAL shall be used. + */ + +#define CPUFREQ_ETERNAL (-1) +struct cpufreq_cpuinfo { + unsigned int max_freq; + unsigned int min_freq; + unsigned int transition_latency; +}; + +struct cpufreq_policy { + unsigned int cpu; /* cpu nr or CPUFREQ_ALL_CPUS */ + unsigned int min; /* in kHz */ + unsigned int max; /* in kHz */ + unsigned int policy; /* see above */ + struct cpufreq_cpuinfo cpuinfo; /* see above */ +}; + +#define CPUFREQ_ADJUST (0) +#define CPUFREQ_INCOMPATIBLE (1) +#define CPUFREQ_NOTIFY (2) + + +/******************** cpufreq transition notifiers *******************/ + +#define CPUFREQ_PRECHANGE (0) +#define CPUFREQ_POSTCHANGE (1) + +struct cpufreq_freqs { + unsigned int cpu; /* cpu nr or CPUFREQ_ALL_CPUS */ + unsigned int old; + unsigned int new; +}; + + +/** + * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch safe) + * @old: old value + * @div: divisor + * @mult: multiplier + * + * Needed for loops_per_jiffy and similar calculations. We do it + * this way to avoid math overflow on 32-bit machines. This will + * become architecture dependent once high-resolution-timer is + * merged (or any other thing that introduces sc_math.h). + * + * new = old * mult / div + */ +static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mult) +{ + unsigned long val, carry; + + mult /= 100; + div /= 100; + val = (old / div) * mult; + carry = old % div; + carry = carry * mult / div; + + return carry + val; +}; + + +/********************************************************************* + * CPUFREQ DRIVER INTERFACE * + *********************************************************************/ + +struct cpufreq_driver { + /* needed by all drivers */ + int (*verify) (struct cpufreq_policy *policy); + int (*setpolicy) (struct cpufreq_policy *policy); + struct cpufreq_policy *policy; + /* 2.4. compatible API */ +#ifdef CONFIG_CPU_FREQ_24_API + unsigned int cpu_cur_freq[NR_CPUS]; +#endif +}; + +int cpufreq_register(struct cpufreq_driver *driver_data); +int cpufreq_unregister(void); + +void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state); + + +static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) +{ + if (policy->min < min) + policy->min = min; + if (policy->max < min) + policy->max = min; + if (policy->min > max) + policy->min = max; + if (policy->max > max) + policy->max = max; + if (policy->min > policy->max) + policy->min = policy->max; + return; +} + +/********************************************************************* + * CPUFREQ 2.6. INTERFACE * + *********************************************************************/ +int cpufreq_set_policy(struct cpufreq_policy *policy); +int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); + +#ifdef CONFIG_PM +int cpufreq_restore(void); +#endif + + +#ifdef CONFIG_CPU_FREQ_24_API +/********************************************************************* + * CPUFREQ 2.4. INTERFACE * + *********************************************************************/ +int cpufreq_setmax(unsigned int cpu); +int cpufreq_set(unsigned int kHz, unsigned int cpu); +unsigned int cpufreq_get(unsigned int cpu); + +/* /proc/sys/cpu */ +enum { + CPU_NR = 1, /* compatibilty reasons */ + CPU_NR_0 = 1, + CPU_NR_1 = 2, + CPU_NR_2 = 3, + CPU_NR_3 = 4, + CPU_NR_4 = 5, + CPU_NR_5 = 6, + CPU_NR_6 = 7, + CPU_NR_7 = 8, + CPU_NR_8 = 9, + CPU_NR_9 = 10, + CPU_NR_10 = 11, + CPU_NR_11 = 12, + CPU_NR_12 = 13, + CPU_NR_13 = 14, + CPU_NR_14 = 15, + CPU_NR_15 = 16, + CPU_NR_16 = 17, + CPU_NR_17 = 18, + CPU_NR_18 = 19, + CPU_NR_19 = 20, + CPU_NR_20 = 21, + CPU_NR_21 = 22, + CPU_NR_22 = 23, + CPU_NR_23 = 24, + CPU_NR_24 = 25, + CPU_NR_25 = 26, + CPU_NR_26 = 27, + CPU_NR_27 = 28, + CPU_NR_28 = 29, + CPU_NR_29 = 30, + CPU_NR_30 = 31, + CPU_NR_31 = 32, +}; + +/* /proc/sys/cpu/{0,1,...,(NR_CPUS-1)} */ +enum { + CPU_NR_FREQ_MAX = 1, + CPU_NR_FREQ_MIN = 2, + CPU_NR_FREQ = 3, +}; + +#define CTL_CPU_VARS_SPEED_MAX(cpunr) { \ + .ctl_name = CPU_NR_FREQ_MAX, \ + .data = &cpu_max_freq[cpunr], \ + .procname = "speed-max", \ + .maxlen = sizeof(cpu_max_freq[cpunr]),\ + .mode = 0444, \ + .proc_handler = proc_dointvec, } + +#define CTL_CPU_VARS_SPEED_MIN(cpunr) { \ + .ctl_name = CPU_NR_FREQ_MIN, \ + .data = &cpu_min_freq[cpunr], \ + .procname = "speed-min", \ + .maxlen = sizeof(cpu_min_freq[cpunr]),\ + .mode = 0444, \ + .proc_handler = proc_dointvec, } + +#define CTL_CPU_VARS_SPEED(cpunr) { \ + .ctl_name = CPU_NR_FREQ, \ + .procname = "speed", \ + .mode = 0644, \ + .proc_handler = cpufreq_procctl, \ + .strategy = cpufreq_sysctl, \ + .extra1 = (void*) (cpunr), } + +#define CTL_TABLE_CPU_VARS(cpunr) static ctl_table ctl_cpu_vars_##cpunr[] = {\ + CTL_CPU_VARS_SPEED_MAX(cpunr), \ + CTL_CPU_VARS_SPEED_MIN(cpunr), \ + CTL_CPU_VARS_SPEED(cpunr), \ + { .ctl_name = 0, }, } + +/* the ctl_table entry for each CPU */ +#define CPU_ENUM(s) { \ + .ctl_name = (CPU_NR + s), \ + .procname = #s, \ + .mode = 0555, \ + .child = ctl_cpu_vars_##s } + +#endif /* CONFIG_CPU_FREQ_24_API */ + +/********************************************************************* + * FREQUENCY TABLE HELPERS * + *********************************************************************/ + +#define CPUFREQ_ENTRY_INVALID ~0 +#define CPUFREQ_TABLE_END ~1 + +struct cpufreq_frequency_table { + unsigned int index; /* any */ + unsigned int frequency; /* kHz - doesn't need to be in ascending + * order */ +}; + +int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table); + +int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table); + +int cpufreq_frequency_table_setpolicy(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table, + unsigned int *index); + +#endif /* _LINUX_CPUFREQ_H */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/dcache.h linux.21pre4-ac2/include/linux/dcache.h --- linux.21pre4/include/linux/dcache.h 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/include/linux/dcache.h 2003-02-04 15:54:22.000000000 +0000 @@ -62,7 +62,8 @@ return end_name_hash(hash); } -#define DNAME_INLINE_LEN 16 +/* XXX: check good value for 64bit */ +#define DNAME_INLINE_LEN 32 struct dentry { atomic_t d_count; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/device-mapper.h linux.21pre4-ac2/include/linux/device-mapper.h --- linux.21pre4/include/linux/device-mapper.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/include/linux/device-mapper.h 2003-01-06 17:11:40.000000000 +0000 @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the LGPL. + */ + +#ifndef _LINUX_DEVICE_MAPPER_H +#define _LINUX_DEVICE_MAPPER_H + +#define DM_DIR "mapper" /* Slashes not supported */ +#define DM_MAX_TYPE_NAME 16 +#define DM_NAME_LEN 128 +#define DM_UUID_LEN 129 + +#ifdef __KERNEL__ + +struct dm_table; +struct dm_dev; +typedef unsigned long offset_t; + +typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; + +/* + * Prototypes for functions for a target + */ +typedef int (*dm_ctr_fn) (struct dm_table *t, offset_t b, offset_t l, + int argc, char **argv, void **context); +typedef void (*dm_dtr_fn) (struct dm_table *t, void *c); +typedef int (*dm_map_fn) (struct buffer_head *bh, int rw, void *context); +typedef int (*dm_err_fn) (struct buffer_head *bh, int rw, void *context); +typedef int (*dm_status_fn) (status_type_t status_type, char *result, + int maxlen, void *context); + +void dm_error(const char *message); + +/* + * Constructors should call these functions to ensure destination devices + * are opened/closed correctly + */ +int dm_table_get_device(struct dm_table *t, const char *path, + offset_t start, offset_t len, + int mode, struct dm_dev **result); +void dm_table_put_device(struct dm_table *table, struct dm_dev *d); + +/* + * Information about a target type + */ +struct target_type { + const char *name; + struct module *module; + dm_ctr_fn ctr; + dm_dtr_fn dtr; + dm_map_fn map; + dm_err_fn err; + dm_status_fn status; +}; + +int dm_register_target(struct target_type *t); +int dm_unregister_target(struct target_type *t); + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_DEVICE_MAPPER_H */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/dm-ioctl.h linux.21pre4-ac2/include/linux/dm-ioctl.h --- linux.21pre4/include/linux/dm-ioctl.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/include/linux/dm-ioctl.h 2003-02-01 16:51:26.000000000 +0000 @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2001 Sistina Software (UK) Limited. + * + * This file is released under the LGPL. + */ + +#ifndef _LINUX_DM_IOCTL_H +#define _LINUX_DM_IOCTL_H + +#include +#include + +/* + * Implements a traditional ioctl interface to the device mapper. + */ + +/* + * All ioctl arguments consist of a single chunk of memory, with + * this structure at the start. If a uuid is specified any + * lookup (eg. for a DM_INFO) will be done on that, *not* the + * name. + */ +struct dm_ioctl { + /* + * The version number is made up of three parts: + * major - no backward or forward compatibility, + * minor - only backwards compatible, + * patch - both backwards and forwards compatible. + * + * All clients of the ioctl interface should fill in the + * version number of the interface that they were + * compiled with. + * + * All recognised ioctl commands (ie. those that don't + * return -ENOTTY) fill out this field, even if the + * command failed. + */ + uint32_t version[3]; /* in/out */ + uint32_t data_size; /* total size of data passed in + * including this struct */ + + uint32_t data_start; /* offset to start of data + * relative to start of this struct */ + + uint32_t target_count; /* in/out */ + uint32_t open_count; /* out */ + uint32_t flags; /* in/out */ + + __kernel_dev_t dev; /* in/out */ + + char name[DM_NAME_LEN]; /* device name */ + char uuid[DM_UUID_LEN]; /* unique identifier for + * the block device */ +}; + +/* + * Used to specify tables. These structures appear after the + * dm_ioctl. + */ +struct dm_target_spec { + int32_t status; /* used when reading from kernel only */ + uint64_t sector_start; + uint32_t length; + + /* + * Offset in bytes (from the start of this struct) to + * next target_spec. + */ + uint32_t next; + + char target_type[DM_MAX_TYPE_NAME]; + + /* + * Parameter string starts immediately after this object. + * Be careful to add padding after string to ensure correct + * alignment of subsequent dm_target_spec. + */ +}; + +/* + * Used to retrieve the target dependencies. + */ +struct dm_target_deps { + uint32_t count; + + __kernel_dev_t dev[0]; /* out */ +}; + +/* + * If you change this make sure you make the corresponding change + * to dm-ioctl.c:lookup_ioctl() + */ +enum { + /* Top level cmds */ + DM_VERSION_CMD = 0, + DM_REMOVE_ALL_CMD, + + /* device level cmds */ + DM_DEV_CREATE_CMD, + DM_DEV_REMOVE_CMD, + DM_DEV_RELOAD_CMD, + DM_DEV_RENAME_CMD, + DM_DEV_SUSPEND_CMD, + DM_DEV_DEPS_CMD, + DM_DEV_STATUS_CMD, + + /* target level cmds */ + DM_TARGET_STATUS_CMD, + DM_TARGET_WAIT_CMD +}; + +#define DM_IOCTL 0xfd + +#define DM_VERSION _IOWR(DM_IOCTL, DM_VERSION_CMD, struct dm_ioctl) +#define DM_REMOVE_ALL _IOWR(DM_IOCTL, DM_REMOVE_ALL_CMD, struct dm_ioctl) + +#define DM_DEV_CREATE _IOWR(DM_IOCTL, DM_DEV_CREATE_CMD, struct dm_ioctl) +#define DM_DEV_REMOVE _IOWR(DM_IOCTL, DM_DEV_REMOVE_CMD, struct dm_ioctl) +#define DM_DEV_RELOAD _IOWR(DM_IOCTL, DM_DEV_RELOAD_CMD, struct dm_ioctl) +#define DM_DEV_SUSPEND _IOWR(DM_IOCTL, DM_DEV_SUSPEND_CMD, struct dm_ioctl) +#define DM_DEV_RENAME _IOWR(DM_IOCTL, DM_DEV_RENAME_CMD, struct dm_ioctl) +#define DM_DEV_DEPS _IOWR(DM_IOCTL, DM_DEV_DEPS_CMD, struct dm_ioctl) +#define DM_DEV_STATUS _IOWR(DM_IOCTL, DM_DEV_STATUS_CMD, struct dm_ioctl) + +#define DM_TARGET_STATUS _IOWR(DM_IOCTL, DM_TARGET_STATUS_CMD, struct dm_ioctl) +#define DM_TARGET_WAIT _IOWR(DM_IOCTL, DM_TARGET_WAIT_CMD, struct dm_ioctl) + +#define DM_VERSION_MAJOR 1 +#define DM_VERSION_MINOR 0 +#define DM_VERSION_PATCHLEVEL 3 +#define DM_VERSION_EXTRA "-ioctl-bk (2002-08-5)" + +/* Status bits */ +#define DM_READONLY_FLAG 0x00000001 +#define DM_SUSPEND_FLAG 0x00000002 +#define DM_EXISTS_FLAG 0x00000004 +#define DM_PERSISTENT_DEV_FLAG 0x00000008 + +/* + * Flag passed into ioctl STATUS command to get table information + * rather than current status. + */ +#define DM_STATUS_TABLE_FLAG 0x00000010 + +#endif /* _LINUX_DM_IOCTL_H */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/dqblk_v1.h linux.21pre4-ac2/include/linux/dqblk_v1.h --- linux.21pre4/include/linux/dqblk_v1.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/include/linux/dqblk_v1.h 2003-01-06 23:24:26.000000000 +0000 @@ -0,0 +1,18 @@ +/* + * File with in-memory structures of old quota format + */ + +#ifndef _LINUX_DQBLK_V1_H +#define _LINUX_DQBLK_V1_H + +/* Id of quota format */ +#define QFMT_VFS_OLD 1 + +/* Root squash turned on */ +#define V1_DQF_RSQUASH 1 + +/* Special information about quotafile */ +struct v1_mem_dqinfo { +}; + +#endif /* _LINUX_DQBLK_V1_H */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/dqblk_v2.h linux.21pre4-ac2/include/linux/dqblk_v2.h --- linux.21pre4/include/linux/dqblk_v2.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/include/linux/dqblk_v2.h 2003-02-01 16:51:25.000000000 +0000 @@ -0,0 +1,20 @@ +/* + * Definitions of structures for vfsv0 quota format + */ + +#ifndef _LINUX_DQBLK_V2_H +#define _LINUX_DQBLK_V2_H + +#include + +/* id numbers of quota format */ +#define QFMT_VFS_V0 2 + +/* Inmemory copy of version specific information */ +struct v2_mem_dqinfo { + unsigned int dqi_blocks; + unsigned int dqi_free_blk; + unsigned int dqi_free_entry; +}; + +#endif /* _LINUX_DQBLK_V2_H */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/dqblk_xfs.h linux.21pre4-ac2/include/linux/dqblk_xfs.h --- linux.21pre4/include/linux/dqblk_xfs.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/include/linux/dqblk_xfs.h 2003-02-01 16:51:25.000000000 +0000 @@ -0,0 +1,159 @@ +/* + * Copyright (c) 1995-2001 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, + * USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ + */ +#ifndef _LINUX_DQBLK_XFS_H +#define _LINUX_DQBLK_XFS_H + +#include + +/* + * Disk quota - quotactl(2) commands for the XFS Quota Manager (XQM). + */ + +#define XQM_CMD(x) (('X'<<8)+(x)) /* note: forms first QCMD argument */ +#define Q_XQUOTAON XQM_CMD(0x1) /* enable accounting/enforcement */ +#define Q_XQUOTAOFF XQM_CMD(0x2) /* disable accounting/enforcement */ +#define Q_XGETQUOTA XQM_CMD(0x3) /* get disk limits and usage */ +#define Q_XSETQLIM XQM_CMD(0x4) /* set disk limits */ +#define Q_XGETQSTAT XQM_CMD(0x5) /* get quota subsystem status */ +#define Q_XQUOTARM XQM_CMD(0x6) /* free disk space used by dquots */ + +/* + * fs_disk_quota structure: + * + * This contains the current quota information regarding a user/proj/group. + * It is 64-bit aligned, and all the blk units are in BBs (Basic Blocks) of + * 512 bytes. + */ +#define FS_DQUOT_VERSION 1 /* fs_disk_quota.d_version */ +typedef struct fs_disk_quota { + __s8 d_version; /* version of this structure */ + __s8 d_flags; /* XFS_{USER,PROJ,GROUP}_QUOTA */ + __u16 d_fieldmask; /* field specifier */ + __u32 d_id; /* user, project, or group ID */ + __u64 d_blk_hardlimit;/* absolute limit on disk blks */ + __u64 d_blk_softlimit;/* preferred limit on disk blks */ + __u64 d_ino_hardlimit;/* maximum # allocated inodes */ + __u64 d_ino_softlimit;/* preferred inode limit */ + __u64 d_bcount; /* # disk blocks owned by the user */ + __u64 d_icount; /* # inodes owned by the user */ + __s32 d_itimer; /* zero if within inode limits */ + /* if not, we refuse service */ + __s32 d_btimer; /* similar to above; for disk blocks */ + __u16 d_iwarns; /* # warnings issued wrt num inodes */ + __u16 d_bwarns; /* # warnings issued wrt disk blocks */ + __s32 d_padding2; /* padding2 - for future use */ + __u64 d_rtb_hardlimit;/* absolute limit on realtime blks */ + __u64 d_rtb_softlimit;/* preferred limit on RT disk blks */ + __u64 d_rtbcount; /* # realtime blocks owned */ + __s32 d_rtbtimer; /* similar to above; for RT disk blks */ + __u16 d_rtbwarns; /* # warnings issued wrt RT disk blks */ + __s16 d_padding3; /* padding3 - for future use */ + char d_padding4[8]; /* yet more padding */ +} fs_disk_quota_t; + +/* + * These fields are sent to Q_XSETQLIM to specify fields that need to change. + */ +#define FS_DQ_ISOFT (1<<0) +#define FS_DQ_IHARD (1<<1) +#define FS_DQ_BSOFT (1<<2) +#define FS_DQ_BHARD (1<<3) +#define FS_DQ_RTBSOFT (1<<4) +#define FS_DQ_RTBHARD (1<<5) +#define FS_DQ_LIMIT_MASK (FS_DQ_ISOFT | FS_DQ_IHARD | FS_DQ_BSOFT | \ + FS_DQ_BHARD | FS_DQ_RTBSOFT | FS_DQ_RTBHARD) +/* + * These timers can only be set in super user's dquot. For others, timers are + * automatically started and stopped. Superusers timer values set the limits + * for the rest. In case these values are zero, the DQ_{F,B}TIMELIMIT values + * defined below are used. + * These values also apply only to the d_fieldmask field for Q_XSETQLIM. + */ +#define FS_DQ_BTIMER (1<<6) +#define FS_DQ_ITIMER (1<<7) +#define FS_DQ_RTBTIMER (1<<8) +#define FS_DQ_TIMER_MASK (FS_DQ_BTIMER | FS_DQ_ITIMER | FS_DQ_RTBTIMER) + +/* + * The following constants define the default amount of time given a user + * before the soft limits are treated as hard limits (usually resulting + * in an allocation failure). These may be modified by the quotactl(2) + * system call with the Q_XSETQLIM command. + */ +#define DQ_FTIMELIMIT (7 * 24*60*60) /* 1 week */ +#define DQ_BTIMELIMIT (7 * 24*60*60) /* 1 week */ + +/* + * Various flags related to quotactl(2). Only relevant to XFS filesystems. + */ +#define XFS_QUOTA_UDQ_ACCT (1<<0) /* user quota accounting */ +#define XFS_QUOTA_UDQ_ENFD (1<<1) /* user quota limits enforcement */ +#define XFS_QUOTA_GDQ_ACCT (1<<2) /* group quota accounting */ +#define XFS_QUOTA_GDQ_ENFD (1<<3) /* group quota limits enforcement */ + +#define XFS_USER_QUOTA (1<<0) /* user quota type */ +#define XFS_PROJ_QUOTA (1<<1) /* (IRIX) project quota type */ +#define XFS_GROUP_QUOTA (1<<2) /* group quota type */ + +/* + * fs_quota_stat is the struct returned in Q_XGETQSTAT for a given file system. + * Provides a centralized way to get meta infomation about the quota subsystem. + * eg. space taken up for user and group quotas, number of dquots currently + * incore. + */ +#define FS_QSTAT_VERSION 1 /* fs_quota_stat.qs_version */ + +/* + * Some basic infomation about 'quota files'. + */ +typedef struct fs_qfilestat { + __u64 qfs_ino; /* inode number */ + __u64 qfs_nblks; /* number of BBs 512-byte-blks */ + __u32 qfs_nextents; /* number of extents */ +} fs_qfilestat_t; + +typedef struct fs_quota_stat { + __s8 qs_version; /* version number for future changes */ + __u16 qs_flags; /* XFS_QUOTA_{U,P,G}DQ_{ACCT,ENFD} */ + __s8 qs_pad; /* unused */ + fs_qfilestat_t qs_uquota; /* user quota storage information */ + fs_qfilestat_t qs_gquota; /* group quota storage information */ + __u32 qs_incoredqs; /* number of dquots incore */ + __s32 qs_btimelimit; /* limit for blks timer */ + __s32 qs_itimelimit; /* limit for inodes timer */ + __s32 qs_rtbtimelimit;/* limit for rt blks timer */ + __u16 qs_bwarnlimit; /* limit for num warnings */ + __u16 qs_iwarnlimit; /* limit for num warnings */ +} fs_quota_stat_t; + +#endif /* _LINUX_DQBLK_XFS_H */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/ext3_jbd.h linux.21pre4-ac2/include/linux/ext3_jbd.h --- linux.21pre4/include/linux/ext3_jbd.h 2003-01-29 16:26:34.000000000 +0000 +++ linux.21pre4-ac2/include/linux/ext3_jbd.h 2003-02-04 15:54:23.000000000 +0000 @@ -28,7 +28,7 @@ * indirection blocks, the group and superblock summaries, and the data * block to complete the transaction. */ -#define EXT3_SINGLEDATA_TRANS_BLOCKS 8 +#define EXT3_SINGLEDATA_TRANS_BLOCKS 8U /* Define the minimum size for a transaction which modifies data. This * needs to take into account the fact that we may end up modifying two @@ -52,7 +52,7 @@ * start off at the maximum transaction size and grow the transaction * optimistically as we go. */ -#define EXT3_MAX_TRANS_DATA 64 +#define EXT3_MAX_TRANS_DATA 64U /* We break up a large truncate or write transaction once the handle's * buffer credits gets this low, we need either to extend the @@ -61,7 +61,7 @@ * one block, plus two quota updates. Quota allocations are not * needed. */ -#define EXT3_RESERVE_TRANS_BLOCKS 12 +#define EXT3_RESERVE_TRANS_BLOCKS 12U int ext3_mark_iloc_dirty(handle_t *handle, diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/fs.h linux.21pre4-ac2/include/linux/fs.h --- linux.21pre4/include/linux/fs.h 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/include/linux/fs.h 2003-02-04 15:54:22.000000000 +0000 @@ -263,7 +263,7 @@ struct page *b_page; /* the page this bh is mapped to */ void (*b_end_io)(struct buffer_head *bh, int uptodate); /* I/O completion */ void *b_private; /* reserved for b_end_io */ - + void *b_journal_head; /* ext3 journal_heads */ unsigned long b_rsector; /* Real buffer location on disk */ wait_queue_head_t b_wait; @@ -395,7 +395,7 @@ int (*flushpage) (struct page *, unsigned long); int (*releasepage) (struct page *, int); #define KERNEL_HAS_O_DIRECT /* this is for modules out of the kernel */ - int (*direct_IO)(int, struct inode *, struct kiobuf *, unsigned long, int); + int (*direct_IO)(int, struct file *, struct kiobuf *, unsigned long, int); void (*removepage)(struct page *); /* called when page gets removed from the inode */ }; @@ -455,6 +455,7 @@ unsigned long i_blksize; unsigned long i_blocks; unsigned long i_version; + unsigned short i_bytes; struct semaphore i_sem; struct semaphore i_zombie; struct inode_operations *i_op; @@ -515,6 +516,39 @@ } u; }; +static inline void inode_add_bytes(struct inode *inode, loff_t bytes) +{ + inode->i_blocks += bytes >> 9; + bytes &= 511; + inode->i_bytes += bytes; + if (inode->i_bytes >= 512) { + inode->i_blocks++; + inode->i_bytes -= 512; + } +} + +static inline void inode_sub_bytes(struct inode *inode, loff_t bytes) +{ + inode->i_blocks -= bytes >> 9; + bytes &= 511; + if (inode->i_bytes < bytes) { + inode->i_blocks--; + inode->i_bytes += 512; + } + inode->i_bytes -= bytes; +} + +static inline loff_t inode_get_bytes(struct inode *inode) +{ + return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes; +} + +static inline void inode_set_bytes(struct inode *inode, loff_t bytes) +{ + inode->i_blocks = bytes >> 9; + inode->i_bytes = bytes & 511; +} + struct fown_struct { int pid; /* pid or -pgrp where SIGIO should be sent */ uid_t uid, euid; /* uid/euid of process setting the owner */ @@ -660,20 +694,6 @@ int last_type; }; -#define DQUOT_USR_ENABLED 0x01 /* User diskquotas enabled */ -#define DQUOT_GRP_ENABLED 0x02 /* Group diskquotas enabled */ - -struct quota_mount_options -{ - unsigned int flags; /* Flags for diskquotas on this device */ - struct semaphore dqio_sem; /* lock device while I/O in progress */ - struct semaphore dqoff_sem; /* serialize quota_off() and quota_on() on device */ - struct file *files[MAXQUOTAS]; /* fp's to quotafiles */ - time_t inode_expire[MAXQUOTAS]; /* expiretime for inode-quota */ - time_t block_expire[MAXQUOTAS]; /* expiretime for block-quota */ - char rsquash[MAXQUOTAS]; /* for quotas threat root as any other user */ -}; - /* * Umount options */ @@ -721,6 +741,7 @@ struct file_system_type *s_type; struct super_operations *s_op; struct dquot_operations *dq_op; + struct quotactl_ops *s_qcop; unsigned long s_flags; unsigned long s_magic; struct dentry *s_root; @@ -735,7 +756,7 @@ struct block_device *s_bdev; struct list_head s_instances; - struct quota_mount_options s_dquot; /* Diskquota specific options */ + struct quota_info s_dquot; /* Diskquota specific options */ union { struct minix_sb_info minix_sb; @@ -959,16 +980,6 @@ __mark_inode_dirty(inode, I_DIRTY_PAGES); } -struct dquot_operations { - void (*initialize) (struct inode *, short); - void (*drop) (struct inode *); - int (*alloc_block) (struct inode *, unsigned long, char); - int (*alloc_inode) (const struct inode *, unsigned long); - void (*free_block) (struct inode *, unsigned long); - void (*free_inode) (const struct inode *, unsigned long); - int (*transfer) (struct inode *, struct iattr *); -}; - struct file_system_type { const char *name; int fs_flags; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/fs_struct.h linux.21pre4-ac2/include/linux/fs_struct.h --- linux.21pre4/include/linux/fs_struct.h 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/include/linux/fs_struct.h 2003-02-04 15:54:22.000000000 +0000 @@ -2,6 +2,9 @@ #define _LINUX_FS_STRUCT_H #ifdef __KERNEL__ +#include /* for RW_LOCK_* */ +#include /* for atomic_t */ + struct fs_struct { atomic_t count; rwlock_t lock; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/hdlc/ioctl.h linux.21pre4-ac2/include/linux/hdlc/ioctl.h --- linux.21pre4/include/linux/hdlc/ioctl.h 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/include/linux/hdlc/ioctl.h 2003-01-28 16:39:15.000000000 +0000 @@ -34,22 +34,15 @@ } fr_proto_pvc; /* for creating/deleting FR PVCs */ typedef struct { + unsigned int dlci; + char master[IFNAMSIZ]; /* Name of master FRAD device */ +}fr_proto_pvc_info; /* for returning PVC information only */ + +typedef struct { unsigned int interval; unsigned int timeout; } cisco_proto; /* PPP doesn't need any info now - supply length = 0 to ioctl */ -union hdlc_settings { - raw_hdlc_proto raw_hdlc; - cisco_proto cisco; - fr_proto fr; - fr_proto_pvc fr_pvc; -}; - -union line_settings { - sync_serial_settings sync; - te1_settings te1; -}; - #endif /* __HDLC_IOCTL_H__ */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/hdlc.h linux.21pre4-ac2/include/linux/hdlc.h --- linux.21pre4/include/linux/hdlc.h 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/include/linux/hdlc.h 2003-02-04 15:54:23.000000000 +0000 @@ -1,12 +1,11 @@ /* * Generic HDLC support routines for Linux * - * Copyright (C) 1999-2002 Krzysztof Halasa + * Copyright (C) 1999-2003 Krzysztof Halasa * * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. */ #ifndef __HDLC_H @@ -52,7 +51,7 @@ #include #define HDLC_MAX_MTU 1500 /* Ethernet 1500 bytes */ -#define HDLC_MAX_MRU (HDLC_MAX_MTU + 10) /* max 10 bytes for FR */ +#define HDLC_MAX_MRU (HDLC_MAX_MTU + 10 + 14 + 4) /* for ETH+VLAN over FR */ #define MAXLEN_LMISTAT 20 /* max size of status enquiry frame */ @@ -145,17 +144,20 @@ typedef struct pvc_device_struct { - struct net_device netdev; /* PVC net device - must be first */ - struct net_device_stats stats; struct hdlc_device_struct *master; - struct pvc_device_struct *next; + struct net_device *main; + struct net_device *ether; /* bridged Ethernet interface */ + struct pvc_device_struct *next; /* Sorted in ascending DLCI order */ + int dlci; + int open_count; struct { - int active; - int new; - int deleted; - int fecn; - int becn; + unsigned int new: 1; + unsigned int active: 1; + unsigned int exist: 1; + unsigned int deleted: 1; + unsigned int fecn: 1; + unsigned int becn: 1; }state; }pvc_device; @@ -180,18 +182,20 @@ void (*stop)(struct hdlc_device_struct *hdlc); void (*proto_detach)(struct hdlc_device_struct *hdlc); void (*netif_rx)(struct sk_buff *skb); + unsigned short (*type_trans)(struct sk_buff *skb, + struct net_device *dev); int proto; /* IF_PROTO_HDLC/CISCO/FR/etc. */ union { struct { fr_proto settings; pvc_device *first_pvc; - int pvc_count; + int dce_pvc_count; struct timer_list timer; int last_poll; int reliable; - int changed; + int dce_changed; int request; int fullrep_sent; u32 last_errors; /* last errors bit list */ @@ -226,6 +230,7 @@ int hdlc_raw_ioctl(hdlc_device *hdlc, struct ifreq *ifr); +int hdlc_raw_eth_ioctl(hdlc_device *hdlc, struct ifreq *ifr); int hdlc_cisco_ioctl(hdlc_device *hdlc, struct ifreq *ifr); int hdlc_ppp_ioctl(hdlc_device *hdlc, struct ifreq *ifr); int hdlc_fr_ioctl(hdlc_device *hdlc, struct ifreq *ifr); @@ -254,15 +259,9 @@ } -static __inline__ struct net_device* pvc_to_dev(pvc_device *pvc) -{ - return &pvc->netdev; -} - - static __inline__ pvc_device* dev_to_pvc(struct net_device *dev) { - return (pvc_device*)dev; + return (pvc_device*)dev->priv; } @@ -272,19 +271,6 @@ } -static __inline__ const char *pvc_to_name(pvc_device *pvc) -{ - return pvc_to_dev(pvc)->name; -} - - -static __inline__ u16 netdev_dlci(struct net_device *dev) -{ - return ntohs(*(u16*)dev->dev_addr); -} - - - static __inline__ u16 q922_to_dlci(u8 *hdr) { return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4); @@ -345,5 +331,15 @@ } +static __inline__ unsigned short hdlc_type_trans(struct sk_buff *skb, + struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(skb->dev); + if (hdlc->type_trans) + return hdlc->type_trans(skb, dev); + else + return __constant_htons(ETH_P_HDLC); +} + #endif /* __KERNEL */ #endif /* __HDLC_H */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/ide.h linux.21pre4-ac2/include/linux/ide.h --- linux.21pre4/include/linux/ide.h 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/include/linux/ide.h 2003-02-04 15:54:22.000000000 +0000 @@ -93,6 +93,13 @@ #endif /* + * Used to indicate "no IRQ", should be a value that cannot be an IRQ + * number. + */ + +#define IDE_NO_IRQ (-1) + +/* * IDE_DRIVE_CMD is used to implement many features of the hdparm utility */ #define IDE_DRIVE_CMD 99 /* (magic) undef to reduce kernel size*/ @@ -301,21 +308,6 @@ ide_pmac, ide_etrax100, ide_acorn } hwif_chipset_t; -typedef struct ide_io_ops_s { - /* insert io operations here! */ - void (*OUTB)(u8 addr, unsigned long port); - void (*OUTW)(u16 addr, unsigned long port); - void (*OUTL)(u32 addr, unsigned long port); - void (*OUTSW)(unsigned long port, void *addr, u32 count); - void (*OUTSL)(unsigned long port, void *addr, u32 count); - - u8 (*INB)(unsigned long port); - u16 (*INW)(unsigned long port); - u32 (*INL)(unsigned long port); - void (*INSW)(unsigned long port, void *addr, u32 count); - void (*INSL)(unsigned long port, void *addr, u32 count); -} ide_io_ops_t; - /* * Structure to hold all information about the location of this port */ @@ -326,9 +318,6 @@ ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ void *priv; /* interface specific data */ hwif_chipset_t chipset; -#if 0 - ide_io_ops_t *iops; /* */ -#endif sata_ioreg_t sata_scr[SATA_NR_PORTS]; sata_ioreg_t sata_misc[SATA_NR_PORTS]; } hw_regs_t; @@ -765,6 +754,7 @@ u8 quirk_list; /* considered quirky, set for a specific host */ u8 suspend_reset; /* drive suspend mode flag, soft-reset recovers */ u8 init_speed; /* transfer rate set at boot */ + u8 pio_speed; /* unused by core, used by some drivers for fallback from DMA */ u8 current_speed; /* current transfer rate set */ u8 dn; /* now wide spread use */ u8 wcache; /* status of write cache */ @@ -838,12 +828,12 @@ #define ide_rq_offset(rq) \ (((rq)->hard_cur_sectors - (rq)->current_nr_sectors) << 9) -extern inline void *ide_map_buffer(struct request *rq, unsigned long *flags) +static inline void *ide_map_buffer(struct request *rq, unsigned long *flags) { return bh_kmap_irq(rq->bh, flags) + ide_rq_offset(rq); } -extern inline void ide_unmap_buffer(char *buffer, unsigned long *flags) +static inline void ide_unmap_buffer(char *buffer, unsigned long *flags) { bh_kunmap_irq(buffer, flags); } @@ -951,10 +941,8 @@ int (*ide_dma_timeout)(ide_drive_t *drive); #endif -#if 0 - ide_io_ops_t *iops; -#else void (*OUTB)(u8 addr, unsigned long port); + void (*OUTBSYNC)(u8 addr, unsigned long port); void (*OUTW)(u16 addr, unsigned long port); void (*OUTL)(u32 addr, unsigned long port); void (*OUTSW)(unsigned long port, void *addr, u32 count); @@ -965,7 +953,6 @@ u32 (*INL)(unsigned long port); void (*INSW)(unsigned long port, void *addr, u32 count); void (*INSL)(unsigned long port, void *addr, u32 count); -#endif /* dma physical region descriptor table (cpu view) */ unsigned int *dmatable_cpu; @@ -1094,12 +1081,13 @@ struct ide_settings_s *next; } ide_settings_t; -void ide_add_setting(ide_drive_t *drive, const char *name, int rw, int read_ioctl, int write_ioctl, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set); -void ide_remove_setting(ide_drive_t *drive, char *name); -ide_settings_t *ide_find_setting_by_name(ide_drive_t *drive, char *name); -int ide_read_setting(ide_drive_t *t, ide_settings_t *setting); -int ide_write_setting(ide_drive_t *drive, ide_settings_t *setting, int val); -void ide_add_generic_settings(ide_drive_t *drive); +extern struct semaphore ide_setting_sem; +extern int ide_add_setting(ide_drive_t *drive, const char *name, int rw, int read_ioctl, int write_ioctl, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set); +extern void ide_remove_setting(ide_drive_t *drive, char *name); +extern ide_settings_t *ide_find_setting_by_name(ide_drive_t *drive, char *name); +extern int ide_read_setting(ide_drive_t *t, ide_settings_t *setting); +extern int ide_write_setting(ide_drive_t *drive, ide_settings_t *setting, int val); +extern void ide_add_generic_settings(ide_drive_t *drive); /* * /proc/ide interface @@ -1278,12 +1266,19 @@ extern int ide_end_request(ide_drive_t *, int /* uptodate */); /* - * This is used on exit from the driver, to designate the next irq handler + * This is used on exit from the driver to designate the next irq handler * and also to start the safety timer. */ extern void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int, ide_expiry_t *); /* + * This is used on exit from the driver to designate the next irq handler + * and start the safety time safely and atomically from the IRQ handler + * with respect to the command issue (which it also does) + */ +extern void ide_execute_command(ide_drive_t *, task_ioreg_t cmd, ide_handler_t *, unsigned int, ide_expiry_t *); + +/* * Error reporting, in human readable form (luxurious, but a memory hog). * * (drive, msg, status) diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/if.h linux.21pre4-ac2/include/linux/if.h --- linux.21pre4/include/linux/if.h 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/include/linux/if.h 2003-02-01 16:51:26.000000000 +0000 @@ -21,6 +21,8 @@ #include /* for "__kernel_caddr_t" et al */ #include /* for "struct sockaddr" et al */ + +#define IFNAMSIZ 16 #include /* Standard interface flags (netdevice->flags). */ @@ -69,7 +71,11 @@ #define IF_PROTO_FR_ADD_PVC 0x2004 /* Create FR PVC */ #define IF_PROTO_FR_DEL_PVC 0x2005 /* Delete FR PVC */ #define IF_PROTO_X25 0x2006 /* X.25 */ - +#define IF_PROTO_HDLC_ETH 0x2007 /* raw HDLC, Ethernet emulation */ +#define IF_PROTO_FR_ADD_ETH_PVC 0x2008 /* Create FR Ethernet-bridged PVC */ +#define IF_PROTO_FR_DEL_ETH_PVC 0x2009 /* Delete FR Ethernet-bridged PVC */ +#define IF_PROTO_FR_PVC 0x200A /* for reading PVC status */ +#define IF_PROTO_FR_ETH_PVC 0x200B /* @@ -103,6 +109,7 @@ cisco_proto *cisco; fr_proto *fr; fr_proto_pvc *fr_pvc; + fr_proto_pvc_info *fr_pvc_info; /* interface settings */ sync_serial_settings *sync; @@ -120,7 +127,6 @@ struct ifreq { #define IFHWADDRLEN 6 -#define IFNAMSIZ 16 union { char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/interrupt.h linux.21pre4-ac2/include/linux/interrupt.h --- linux.21pre4/include/linux/interrupt.h 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/include/linux/interrupt.h 2003-02-04 15:54:22.000000000 +0000 @@ -40,7 +40,8 @@ CM206_BH, JS_BH, MACSERIAL_BH, - ISICOM_BH + ISICOM_BH, + SIOLX_BH }; #include diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/jbd.h linux.21pre4-ac2/include/linux/jbd.h --- linux.21pre4/include/linux/jbd.h 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/include/linux/jbd.h 2003-02-04 15:54:23.000000000 +0000 @@ -285,7 +285,7 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh) { - return bh->b_private; + return bh->b_journal_head; } #define HAVE_JOURNAL_CALLBACK_STATUS diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/kbd_kern.h linux.21pre4-ac2/include/linux/kbd_kern.h --- linux.21pre4/include/linux/kbd_kern.h 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/include/linux/kbd_kern.h 2003-02-04 15:54:22.000000000 +0000 @@ -72,6 +72,7 @@ extern int do_poke_blanked_console; extern void (*kbd_ledfunc)(unsigned int led); +extern void kbd_refresh_leds(void); extern void set_console(int nr); extern void schedule_console_callback(void); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/lockd/lockd.h linux.21pre4-ac2/include/linux/lockd/lockd.h --- linux.21pre4/include/linux/lockd/lockd.h 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/include/linux/lockd/lockd.h 2003-02-04 15:54:23.000000000 +0000 @@ -89,8 +89,11 @@ /* * This is a server block (i.e. a lock requested by some client which * couldn't be granted because of a conflicting lock). + * + * XXX: Beware of signedness errors. b_when is passed as a signed long + * into time_before_eq et al. --okir */ -#define NLM_NEVER (~(unsigned long) 0) +#define NLM_NEVER (0x7FFFFFF) struct nlm_block { struct nlm_block * b_next; /* linked list (all blocks) */ struct nlm_block * b_fnext; /* linked list (per file) */ @@ -161,6 +164,7 @@ u32 nlmsvc_testlock(struct nlm_file *, struct nlm_lock *, struct nlm_lock *); u32 nlmsvc_cancel_blocked(struct nlm_file *, struct nlm_lock *); +void nlmsvc_grant_reply(struct nlm_cookie *, u32); unsigned long nlmsvc_retry_blocked(void); int nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, int action); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/malloc.h linux.21pre4-ac2/include/linux/malloc.h --- linux.21pre4/include/linux/malloc.h 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/include/linux/malloc.h 2003-02-04 15:54:22.000000000 +0000 @@ -1,7 +1,7 @@ #ifndef _LINUX_MALLOC_H #define _LINUX_MALLOC_H -#warning linux/malloc.h is deprecated, use linux/slab.h instead. +#error linux/malloc.h is deprecated, use linux/slab.h instead. #include #endif /* _LINUX_MALLOC_H */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/mempool.h linux.21pre4-ac2/include/linux/mempool.h --- linux.21pre4/include/linux/mempool.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/include/linux/mempool.h 2003-02-04 15:54:23.000000000 +0000 @@ -0,0 +1,31 @@ +/* + * memory buffer pool support + */ +#ifndef _LINUX_MEMPOOL_H +#define _LINUX_MEMPOOL_H + +#include +#include + +struct mempool_s; +typedef struct mempool_s mempool_t; + +typedef void * (mempool_alloc_t)(int gfp_mask, void *pool_data); +typedef void (mempool_free_t)(void *element, void *pool_data); + +extern mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data); +extern int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask); +extern void mempool_destroy(mempool_t *pool); +extern void * mempool_alloc(mempool_t *pool, int gfp_mask); +extern void mempool_free(void *element, mempool_t *pool); + +/* + * A mempool_alloc_t and mempool_free_t that get the memory from + * a slab that is passed in through pool_data. + */ +void *mempool_alloc_slab(int gfp_mask, void *pool_data); +void mempool_free_slab(void *element, void *pool_data); + + +#endif /* _LINUX_MEMPOOL_H */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/mman.h linux.21pre4-ac2/include/linux/mman.h --- linux.21pre4/include/linux/mman.h 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/include/linux/mman.h 2003-01-06 15:38:09.000000000 +0000 @@ -6,4 +6,8 @@ #define MREMAP_MAYMOVE 1 #define MREMAP_FIXED 2 +extern int vm_enough_memory(long pages); +extern void vm_unacct_memory(long pages); +extern void vm_validate_enough(char *x); + #endif /* _LINUX_MMAN_H */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/mm.h linux.21pre4-ac2/include/linux/mm.h --- linux.21pre4/include/linux/mm.h 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/include/linux/mm.h 2003-02-04 15:54:22.000000000 +0000 @@ -104,7 +104,13 @@ #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ #define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */ -#define VM_STACK_FLAGS 0x00000177 +#define VM_ACCOUNT 0x00100000 /* Memory is a vm accounted object */ + +#ifdef ARCH_STACK_GROWSUP +#define VM_STACK_FLAGS (VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT) +#else +#define VM_STACK_FLAGS (VM_DATA_DEFAULT_FLAGS|VM_GROWSDOWN|VM_ACCOUNT) +#endif #define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ) #define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK @@ -557,7 +563,7 @@ return ret; } -extern int do_munmap(struct mm_struct *, unsigned long, size_t); +extern int do_munmap(struct mm_struct *, unsigned long, size_t, int acct); extern unsigned long do_brk(unsigned long, unsigned long); @@ -624,34 +630,9 @@ return gfp_mask; } - -/* vma is the first one with address < vma->vm_end, - * and even address < vma->vm_start. Have to extend vma. */ -static inline int expand_stack(struct vm_area_struct * vma, unsigned long address) -{ - unsigned long grow; - /* - * vma->vm_start/vm_end cannot change under us because the caller is required - * to hold the mmap_sem in write mode. We need to get the spinlock only - * before relocating the vma range ourself. - */ - address &= PAGE_MASK; - spin_lock(&vma->vm_mm->page_table_lock); - grow = (vma->vm_start - address) >> PAGE_SHIFT; - if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur || - ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) { - spin_unlock(&vma->vm_mm->page_table_lock); - return -ENOMEM; - } - vma->vm_start = address; - vma->vm_pgoff -= grow; - vma->vm_mm->total_vm += grow; - if (vma->vm_flags & VM_LOCKED) - vma->vm_mm->locked_vm += grow; - spin_unlock(&vma->vm_mm->page_table_lock); - return 0; -} +/* Do stack extension */ +extern int expand_stack(struct vm_area_struct * vma, unsigned long address); /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/nfs_fs.h linux.21pre4-ac2/include/linux/nfs_fs.h --- linux.21pre4/include/linux/nfs_fs.h 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/include/linux/nfs_fs.h 2003-02-04 15:54:23.000000000 +0000 @@ -274,6 +274,11 @@ #define NFS_TestClearPageSync(page) test_and_clear_bit(PG_fs_1, &(page)->flags) /* + * linux/fs/nfs/direct.c + */ +extern int nfs_direct_IO(int, struct file *, struct kiobuf *, unsigned long, int); + +/* * linux/fs/mount_clnt.c * (Used only by nfsroot module) */ @@ -302,6 +307,23 @@ return __nfs_refresh_inode(inode,fattr); } +/* + * This function will be used to simulate weak cache consistency + * under NFSv2 when the NFSv3 attribute patch is included. + * For the moment, we just call nfs_refresh_inode(). + */ +static __inline__ int +nfs_write_attributes(struct inode *inode, struct nfs_fattr *fattr) +{ + if ((fattr->valid & NFS_ATTR_FATTR) && !(fattr->valid & NFS_ATTR_WCC)) { + fattr->pre_size = NFS_CACHE_ISIZE(inode); + fattr->pre_mtime = NFS_CACHE_MTIME(inode); + fattr->pre_ctime = NFS_CACHE_CTIME(inode); + fattr->valid |= NFS_ATTR_WCC; + } + return nfs_refresh_inode(inode, fattr); +} + static inline loff_t nfs_size_to_loff_t(__u64 size) { diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/nfs_xdr.h linux.21pre4-ac2/include/linux/nfs_xdr.h --- linux.21pre4/include/linux/nfs_xdr.h 2003-01-29 16:26:34.000000000 +0000 +++ linux.21pre4-ac2/include/linux/nfs_xdr.h 2003-01-06 19:20:53.000000000 +0000 @@ -59,7 +59,7 @@ /* Arguments to the read call. * Note that NFS_READ_MAXIOV must be <= (MAX_IOVEC-2) from sunrpc/xprt.h */ -#define NFS_READ_MAXIOV 8 +#define NFS_READ_MAXIOV (9) struct nfs_readargs { struct nfs_fh * fh; @@ -78,7 +78,7 @@ /* Arguments to the write call. * Note that NFS_WRITE_MAXIOV must be <= (MAX_IOVEC-2) from sunrpc/xprt.h */ -#define NFS_WRITE_MAXIOV 8 +#define NFS_WRITE_MAXIOV (9) struct nfs_writeargs { struct nfs_fh * fh; __u64 offset; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/pci_ids.h linux.21pre4-ac2/include/linux/pci_ids.h --- linux.21pre4/include/linux/pci_ids.h 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/include/linux/pci_ids.h 2003-01-11 23:20:03.000000000 +0000 @@ -1117,6 +1117,7 @@ #define PCI_DEVICE_ID_SYSKONNECT_FP 0x4000 #define PCI_DEVICE_ID_SYSKONNECT_TR 0x4200 #define PCI_DEVICE_ID_SYSKONNECT_GE 0x4300 +#define PCI_DEVICE_ID_SYSKONNECT_YU 0x4320 #define PCI_VENDOR_ID_VMIC 0x114a #define PCI_DEVICE_ID_VMIC_VME 0x7587 @@ -1770,6 +1771,7 @@ #define PCI_DEVICE_ID_INTEL_82454GX 0x84c4 #define PCI_DEVICE_ID_INTEL_82450GX 0x84c5 #define PCI_DEVICE_ID_INTEL_82451NX 0x84ca +#define PCI_DEVICE_ID_INTEL_82454NX 0x84cb #define PCI_VENDOR_ID_COMPUTONE 0x8e0e #define PCI_DEVICE_ID_COMPUTONE_IP2EX 0x0291 diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/pc_keyb.h linux.21pre4-ac2/include/linux/pc_keyb.h --- linux.21pre4/include/linux/pc_keyb.h 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/include/linux/pc_keyb.h 2003-01-06 15:42:23.000000000 +0000 @@ -128,3 +128,6 @@ struct fasync_struct *fasync; unsigned char buf[AUX_BUF_SIZE]; }; + +extern void pckbd_blink (char); + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/pnpbios.h linux.21pre4-ac2/include/linux/pnpbios.h --- linux.21pre4/include/linux/pnpbios.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/include/linux/pnpbios.h 2003-02-04 15:54:23.000000000 +0000 @@ -0,0 +1,212 @@ +/* + * Include file for the interface to a PnP BIOS + * + * Original BIOS code (C) 1998 Christian Schmidt (chr.schmidt@tu-bs.de) + * PnP handler parts (c) 1998 Tom Lees + * Minor reorganizations by David Hinds + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _LINUX_PNPBIOS_H +#define _LINUX_PNPBIOS_H + +#ifdef __KERNEL__ + +#include +#include + +/* + * Status codes (warnings and errors) + */ +#define PNP_SUCCESS 0x00 +#define PNP_NOT_SET_STATICALLY 0x7f +#define PNP_UNKNOWN_FUNCTION 0x81 +#define PNP_FUNCTION_NOT_SUPPORTED 0x82 +#define PNP_INVALID_HANDLE 0x83 +#define PNP_BAD_PARAMETER 0x84 +#define PNP_SET_FAILED 0x85 +#define PNP_EVENTS_NOT_PENDING 0x86 +#define PNP_SYSTEM_NOT_DOCKED 0x87 +#define PNP_NO_ISA_PNP_CARDS 0x88 +#define PNP_UNABLE_TO_DETERMINE_DOCK_CAPABILITIES 0x89 +#define PNP_CONFIG_CHANGE_FAILED_NO_BATTERY 0x8a +#define PNP_CONFIG_CHANGE_FAILED_RESOURCE_CONFLICT 0x8b +#define PNP_BUFFER_TOO_SMALL 0x8c +#define PNP_USE_ESCD_SUPPORT 0x8d +#define PNP_MESSAGE_NOT_SUPPORTED 0x8e +#define PNP_HARDWARE_ERROR 0x8f + +#define ESCD_SUCCESS 0x00 +#define ESCD_IO_ERROR_READING 0x55 +#define ESCD_INVALID 0x56 +#define ESCD_BUFFER_TOO_SMALL 0x59 +#define ESCD_NVRAM_TOO_SMALL 0x5a +#define ESCD_FUNCTION_NOT_SUPPORTED 0x81 + +/* + * Events that can be received by "get event" + */ +#define PNPEV_ABOUT_TO_CHANGE_CONFIG 0x0001 +#define PNPEV_DOCK_CHANGED 0x0002 +#define PNPEV_SYSTEM_DEVICE_CHANGED 0x0003 +#define PNPEV_CONFIG_CHANGED_FAILED 0x0004 +#define PNPEV_UNKNOWN_SYSTEM_EVENT 0xffff +/* 0x8000 through 0xfffe are OEM defined */ + +/* + * Messages that should be sent through "send message" + */ +#define PNPMSG_OK 0x00 +#define PNPMSG_ABORT 0x01 +#define PNPMSG_UNDOCK_DEFAULT_ACTION 0x40 +#define PNPMSG_POWER_OFF 0x41 +#define PNPMSG_PNP_OS_ACTIVE 0x42 +#define PNPMSG_PNP_OS_INACTIVE 0x43 +/* 0x8000 through 0xffff are OEM defined */ + +#pragma pack(1) +struct pnp_dev_node_info { + __u16 no_nodes; + __u16 max_node_size; +}; +struct pnp_docking_station_info { + __u32 location_id; + __u32 serial; + __u16 capabilities; +}; +struct pnp_isa_config_struc { + __u8 revision; + __u8 no_csns; + __u16 isa_rd_data_port; + __u16 reserved; +}; +struct escd_info_struc { + __u16 min_escd_write_size; + __u16 escd_size; + __u32 nv_storage_base; +}; +struct pnp_bios_node { + __u16 size; + __u8 handle; + __u32 eisa_id; + __u8 type_code[3]; + __u16 flags; + __u8 data[0]; +}; +#pragma pack() + +struct pnpbios_device_id +{ + char id[8]; + unsigned long driver_data; +}; + +struct pnpbios_driver { + struct list_head node; + char *name; + const struct pnpbios_device_id *id_table; /* NULL if wants all devices */ + int (*probe) (struct pci_dev *dev, const struct pnpbios_device_id *id); /* New device inserted */ + void (*remove) (struct pci_dev *dev); /* Device removed, either due to hotplug remove or module remove */ +}; + +#ifdef CONFIG_PNPBIOS + +/* exported */ +extern int pnpbios_register_driver(struct pnpbios_driver *drv); +extern void pnpbios_unregister_driver(struct pnpbios_driver *drv); + +/* non-exported */ +#define pnpbios_for_each_dev(dev) \ + for(dev = pnpbios_dev_g(pnpbios_devices.next); dev != pnpbios_dev_g(&pnpbios_devices); dev = pnpbios_dev_g(dev->global_list.next)) + + +#define pnpbios_dev_g(n) list_entry(n, struct pci_dev, global_list) + +static __inline struct pnpbios_driver *pnpbios_dev_driver(const struct pci_dev *dev) +{ + return (struct pnpbios_driver *)dev->driver; +} + +extern int pnpbios_dont_use_current_config; +extern void *pnpbios_kmalloc(size_t size, int f); +extern int pnpbios_init (void); +extern int pnpbios_proc_init (void); +extern void pnpbios_proc_exit (void); + +extern int pnp_bios_dev_node_info (struct pnp_dev_node_info *data); +extern int pnp_bios_get_dev_node (u8 *nodenum, char config, struct pnp_bios_node *data); +extern int pnp_bios_set_dev_node (u8 nodenum, char config, struct pnp_bios_node *data); +extern int pnp_bios_get_stat_res (char *info); +extern int pnp_bios_isapnp_config (struct pnp_isa_config_struc *data); +extern int pnp_bios_escd_info (struct escd_info_struc *data); +extern int pnp_bios_read_escd (char *data, u32 nvram_base); +#if needed +extern int pnp_bios_get_event (u16 *message); +extern int pnp_bios_send_message (u16 message); +extern int pnp_bios_set_stat_res (char *info); +extern int pnp_bios_apm_id_table (char *table, u16 *size); +extern int pnp_bios_write_escd (char *data, u32 nvram_base); +#endif + +/* + * a helper function which helps ensure correct pnpbios_driver + * setup and cleanup for commonly-encountered hotplug/modular cases + * + * This MUST stay in a header, as it checks for -DMODULE + */ + +static inline int pnpbios_module_init(struct pnpbios_driver *drv) +{ + int rc = pnpbios_register_driver (drv); + + if (rc > 0) + return 0; + + /* iff CONFIG_HOTPLUG and built into kernel, we should + * leave the driver around for future hotplug events. + * For the module case, a hotplug daemon of some sort + * should load a module in response to an insert event. */ +#if defined(CONFIG_HOTPLUG) && !defined(MODULE) + if (rc == 0) + return 0; +#else + if (rc == 0) + rc = -ENODEV; +#endif + + /* if we get here, we need to clean up pci driver instance + * and return some sort of error */ + pnpbios_unregister_driver (drv); + + return rc; +} + +#else /* CONFIG_PNPBIOS */ + +static __inline__ int pnpbios_register_driver(struct pnpbios_driver *drv) +{ + return 0; +} + +static __inline__ void pnpbios_unregister_driver(struct pnpbios_driver *drv) +{ + return; +} + +#endif /* CONFIG_PNPBIOS */ +#endif /* __KERNEL__ */ + +#endif /* _LINUX_PNPBIOS_H */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/quotacompat.h linux.21pre4-ac2/include/linux/quotacompat.h --- linux.21pre4/include/linux/quotacompat.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/include/linux/quotacompat.h 2003-02-01 16:51:26.000000000 +0000 @@ -0,0 +1,86 @@ +/* + * Definition of symbols used for backward compatible interface + */ + +#ifndef _LINUX_QUOTACOMPAT_ +#define _LINUX_QUOTACOMPAT_ + +#include +#include + +struct v1c_mem_dqblk { + __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */ + __u32 dqb_bsoftlimit; /* preferred limit on disk blks */ + __u32 dqb_curblocks; /* current block count */ + __u32 dqb_ihardlimit; /* maximum # allocated inodes */ + __u32 dqb_isoftlimit; /* preferred inode limit */ + __u32 dqb_curinodes; /* current # allocated inodes */ + time_t dqb_btime; /* time limit for excessive disk use */ + time_t dqb_itime; /* time limit for excessive files */ +}; + +struct v1c_dqstats { + __u32 lookups; + __u32 drops; + __u32 reads; + __u32 writes; + __u32 cache_hits; + __u32 allocated_dquots; + __u32 free_dquots; + __u32 syncs; +}; + +struct v2c_mem_dqblk { + unsigned int dqb_ihardlimit; + unsigned int dqb_isoftlimit; + unsigned int dqb_curinodes; + unsigned int dqb_bhardlimit; + unsigned int dqb_bsoftlimit; + qsize_t dqb_curspace; + __kernel_time_t dqb_btime; + __kernel_time_t dqb_itime; +}; + +struct v2c_mem_dqinfo { + unsigned int dqi_bgrace; + unsigned int dqi_igrace; + unsigned int dqi_flags; + unsigned int dqi_blocks; + unsigned int dqi_free_blk; + unsigned int dqi_free_entry; +}; + +struct v2c_dqstats { + __u32 lookups; + __u32 drops; + __u32 reads; + __u32 writes; + __u32 cache_hits; + __u32 allocated_dquots; + __u32 free_dquots; + __u32 syncs; + __u32 version; +}; + +#define Q_COMP_QUOTAON 0x0100 /* enable quotas */ +#define Q_COMP_QUOTAOFF 0x0200 /* disable quotas */ +#define Q_COMP_SYNC 0x0600 /* sync disk copy of a filesystems quotas */ + +#define Q_V1_GETQUOTA 0x0300 /* get limits and usage */ +#define Q_V1_SETQUOTA 0x0400 /* set limits and usage */ +#define Q_V1_SETUSE 0x0500 /* set usage */ +#define Q_V1_SETQLIM 0x0700 /* set limits */ +#define Q_V1_GETSTATS 0x0800 /* get collected stats */ +#define Q_V1_RSQUASH 0x1000 /* set root_squash option */ + +#define Q_V2_SETQLIM 0x0700 /* set limits */ +#define Q_V2_GETINFO 0x0900 /* get info about quotas - graces, flags... */ +#define Q_V2_SETINFO 0x0A00 /* set info about quotas */ +#define Q_V2_SETGRACE 0x0B00 /* set inode and block grace */ +#define Q_V2_SETFLAGS 0x0C00 /* set flags for quota */ +#define Q_V2_GETQUOTA 0x0D00 /* get limits and usage */ +#define Q_V2_SETQUOTA 0x0E00 /* set limits and usage */ +#define Q_V2_SETUSE 0x0F00 /* set usage */ +#define Q_V2_GETSTATS 0x1100 /* get collected stats */ + +#endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/quota.h linux.21pre4-ac2/include/linux/quota.h --- linux.21pre4/include/linux/quota.h 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/include/linux/quota.h 2003-02-01 16:51:26.000000000 +0000 @@ -40,30 +40,22 @@ #define _LINUX_QUOTA_ #include +#include -/* - * Convert diskblocks to blocks and the other way around. - */ -#define dbtob(num) (num << BLOCK_SIZE_BITS) -#define btodb(num) (num >> BLOCK_SIZE_BITS) +#define __DQUOT_VERSION__ "dquot_6.5.1" +#define __DQUOT_NUM_VERSION__ 6*10000+5*100+1 -/* - * Convert count of filesystem blocks to diskquota blocks, meant - * for filesystems where i_blksize != BLOCK_SIZE - */ -#define fs_to_dq_blocks(num, blksize) (((num) * (blksize)) / BLOCK_SIZE) +typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ +typedef __u64 qsize_t; /* Type in which we store sizes */ -/* - * Definitions for disk quotas imposed on the average user - * (big brother finally hits Linux). - * - * The following constants define the amount of time given a user - * before the soft limits are treated as hard limits (usually resulting - * in an allocation failure). The timer is started when the user crosses - * their soft limit, it is reset when they go below their soft limit. - */ -#define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */ -#define MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */ +/* Size of blocks in which are counted size limits */ +#define QUOTABLOCK_BITS 10 +#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) + +/* Conversion routines from and to quota blocks */ +#define qb2kb(x) ((x) << (QUOTABLOCK_BITS-10)) +#define kb2qb(x) ((x) >> (QUOTABLOCK_BITS-10)) +#define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS) #define MAXQUOTAS 2 #define USRQUOTA 0 /* element used for user quotas */ @@ -78,9 +70,6 @@ "undefined", \ }; -#define QUOTAFILENAME "quota" -#define QUOTAGROUP "staff" - /* * Command definitions for the 'quotactl' system call. * The commands are broken into a main command defined below @@ -91,61 +80,122 @@ #define SUBCMDSHIFT 8 #define QCMD(cmd, type) (((cmd) << SUBCMDSHIFT) | ((type) & SUBCMDMASK)) -#define Q_QUOTAON 0x0100 /* enable quotas */ -#define Q_QUOTAOFF 0x0200 /* disable quotas */ -#define Q_GETQUOTA 0x0300 /* get limits and usage */ -#define Q_SETQUOTA 0x0400 /* set limits and usage */ -#define Q_SETUSE 0x0500 /* set usage */ -#define Q_SYNC 0x0600 /* sync disk copy of a filesystems quotas */ -#define Q_SETQLIM 0x0700 /* set limits */ -#define Q_GETSTATS 0x0800 /* get collected stats */ -#define Q_RSQUASH 0x1000 /* set root_squash option */ - -/* - * The following structure defines the format of the disk quota file - * (as it appears on disk) - the file is an array of these structures - * indexed by user or group number. +#define Q_SYNC 0x800001 /* sync disk copy of a filesystems quotas */ +#define Q_QUOTAON 0x800002 /* turn quotas on */ +#define Q_QUOTAOFF 0x800003 /* turn quotas off */ +#define Q_GETFMT 0x800004 /* get quota format used on given filesystem */ +#define Q_GETINFO 0x800005 /* get information about quota files */ +#define Q_SETINFO 0x800006 /* set information about quota files */ +#define Q_GETQUOTA 0x800007 /* get user quota structure */ +#define Q_SETQUOTA 0x800008 /* set user quota structure */ + +/* + * Quota structure used for communication with userspace via quotactl + * Following flags are used to specify which fields are valid */ -struct dqblk { +#define QIF_BLIMITS 1 +#define QIF_SPACE 2 +#define QIF_ILIMITS 4 +#define QIF_INODES 8 +#define QIF_BTIME 16 +#define QIF_ITIME 32 +#define QIF_LIMITS (QIF_BLIMITS | QIF_ILIMITS) +#define QIF_USAGE (QIF_SPACE | QIF_INODES) +#define QIF_TIMES (QIF_BTIME | QIF_ITIME) +#define QIF_ALL (QIF_LIMITS | QIF_USAGE | QIF_TIMES) + +struct if_dqblk { + __u64 dqb_bhardlimit; + __u64 dqb_bsoftlimit; + __u64 dqb_curspace; + __u64 dqb_ihardlimit; + __u64 dqb_isoftlimit; + __u64 dqb_curinodes; + __u64 dqb_btime; + __u64 dqb_itime; + __u32 dqb_valid; +}; + +/* + * Structure used for setting quota information about file via quotactl + * Following flags are used to specify which fields are valid + */ +#define IIF_BGRACE 1 +#define IIF_IGRACE 2 +#define IIF_FLAGS 4 +#define IIF_ALL (IIF_BGRACE | IIF_IGRACE | IIF_FLAGS) + +struct if_dqinfo { + __u64 dqi_bgrace; + __u64 dqi_igrace; + __u32 dqi_flags; + __u32 dqi_valid; +}; + +#ifdef __KERNEL__ + +#include +#include +#include + +/* + * Data for one user/group kept in memory + */ +struct mem_dqblk { __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */ __u32 dqb_bsoftlimit; /* preferred limit on disk blks */ - __u32 dqb_curblocks; /* current block count */ + qsize_t dqb_curspace; /* current used space */ __u32 dqb_ihardlimit; /* absolute limit on allocated inodes */ __u32 dqb_isoftlimit; /* preferred inode limit */ __u32 dqb_curinodes; /* current # allocated inodes */ - time_t dqb_btime; /* time limit for excessive disk use */ - time_t dqb_itime; /* time limit for excessive inode use */ + time_t dqb_btime; /* time limit for excessive disk use */ + time_t dqb_itime; /* time limit for excessive inode use */ }; /* - * Shorthand notation. + * Data for one quotafile kept in memory */ -#define dq_bhardlimit dq_dqb.dqb_bhardlimit -#define dq_bsoftlimit dq_dqb.dqb_bsoftlimit -#define dq_curblocks dq_dqb.dqb_curblocks -#define dq_ihardlimit dq_dqb.dqb_ihardlimit -#define dq_isoftlimit dq_dqb.dqb_isoftlimit -#define dq_curinodes dq_dqb.dqb_curinodes -#define dq_btime dq_dqb.dqb_btime -#define dq_itime dq_dqb.dqb_itime +struct quota_format_type; + +struct mem_dqinfo { + struct quota_format_type *dqi_format; + int dqi_flags; + unsigned int dqi_bgrace; + unsigned int dqi_igrace; + union { + struct v1_mem_dqinfo v1_i; + struct v2_mem_dqinfo v2_i; + } u; +}; + +#define DQF_MASK 0xffff /* Mask for format specific flags */ +#define DQF_INFO_DIRTY 0x10000 /* Is info dirty? */ +#define DQF_ANY_DQUOT_DIRTY 0x20000 /* Is any dquot dirty? */ -#define dqoff(UID) ((loff_t)((UID) * sizeof (struct dqblk))) +extern inline void mark_info_dirty(struct mem_dqinfo *info) +{ + info->dqi_flags |= DQF_INFO_DIRTY; +} + +#define info_dirty(info) ((info)->dqi_flags & DQF_INFO_DIRTY) + +#define info_any_dirty(info) ((info)->dqi_flags & DQF_INFO_DIRTY ||\ + (info)->dqi_flags & DQF_ANY_DQUOT_DIRTY) + +#define sb_dqopt(sb) (&(sb)->s_dquot) struct dqstats { - __u32 lookups; - __u32 drops; - __u32 reads; - __u32 writes; - __u32 cache_hits; - __u32 allocated_dquots; - __u32 free_dquots; - __u32 syncs; + int lookups; + int drops; + int reads; + int writes; + int cache_hits; + int allocated_dquots; + int free_dquots; + int syncs; }; -#ifdef __KERNEL__ - -extern int nr_dquots, nr_free_dquots; -extern int dquot_root_squash; +extern struct dqstats dqstats; #define NR_DQHASH 43 /* Just an arbitrary number */ @@ -162,37 +212,113 @@ struct list_head dq_free; /* Free list element */ wait_queue_head_t dq_wait_lock; /* Pointer to waitqueue on dquot lock */ wait_queue_head_t dq_wait_free; /* Pointer to waitqueue for quota to be unused */ - int dq_count; /* Reference count */ + int dq_count; /* Use count */ + int dq_dup_ref; /* Number of duplicated refences */ /* fields after this point are cleared when invalidating */ struct super_block *dq_sb; /* superblock this applies to */ unsigned int dq_id; /* ID this applies to (uid, gid) */ kdev_t dq_dev; /* Device this applies to */ + loff_t dq_off; /* Offset of dquot on disk */ short dq_type; /* Type of quota */ short dq_flags; /* See DQ_* */ unsigned long dq_referenced; /* Number of times this dquot was referenced during its lifetime */ - struct dqblk dq_dqb; /* Diskquota usage */ + struct mem_dqblk dq_dqb; /* Diskquota usage */ }; #define NODQUOT (struct dquot *)NULL -/* - * Flags used for set_dqblk. - */ -#define SET_QUOTA 0x02 -#define SET_USE 0x04 -#define SET_QLIMIT 0x08 - #define QUOTA_OK 0 #define NO_QUOTA 1 +/* Operations which must be implemented by each quota format */ +struct quota_format_ops { + int (*check_quota_file)(struct super_block *sb, int type); /* Detect whether file is in our format */ + int (*read_file_info)(struct super_block *sb, int type); /* Read main info about file - called on quotaon() */ + int (*write_file_info)(struct super_block *sb, int type); /* Write main info about file */ + int (*free_file_info)(struct super_block *sb, int type); /* Called on quotaoff() */ + int (*read_dqblk)(struct dquot *dquot); /* Read structure for one user */ + int (*commit_dqblk)(struct dquot *dquot); /* Write (or delete) structure for one user */ +}; + +/* Operations working with dquots */ +struct dquot_operations { + void (*initialize) (struct inode *, int); + void (*drop) (struct inode *); + int (*alloc_space) (struct inode *, qsize_t, int); + int (*alloc_inode) (const struct inode *, unsigned long); + void (*free_space) (struct inode *, qsize_t); + void (*free_inode) (const struct inode *, unsigned long); + int (*transfer) (struct inode *, struct iattr *); +}; + +/* Operations handling requests from userspace */ +struct quotactl_ops { + int (*quota_on)(struct super_block *, int, int, char *); + int (*quota_off)(struct super_block *, int); + int (*quota_sync)(struct super_block *, int); + int (*get_info)(struct super_block *, int, struct if_dqinfo *); + int (*set_info)(struct super_block *, int, struct if_dqinfo *); + int (*get_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *); + int (*set_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *); + int (*get_xstate)(struct super_block *, struct fs_quota_stat *); + int (*set_xstate)(struct super_block *, unsigned int, int); + int (*get_xquota)(struct super_block *, int, qid_t, struct fs_disk_quota *); + int (*set_xquota)(struct super_block *, int, qid_t, struct fs_disk_quota *); +}; + +struct quota_format_type { + int qf_fmt_id; /* Quota format id */ + struct quota_format_ops *qf_ops; /* Operations of format */ + struct module *qf_owner; /* Module implementing quota format */ + struct quota_format_type *qf_next; +}; + +#define DQUOT_USR_ENABLED 0x01 /* User diskquotas enabled */ +#define DQUOT_GRP_ENABLED 0x02 /* Group diskquotas enabled */ + +struct quota_info { + unsigned int flags; /* Flags for diskquotas on this device */ + struct semaphore dqio_sem; /* lock device while I/O in progress */ + struct semaphore dqoff_sem; /* serialize quota_off() and quota_on() on device */ + struct file *files[MAXQUOTAS]; /* fp's to quotafiles */ + struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ + struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ +}; + +/* Inline would be better but we need to dereference super_block which is not defined yet */ +#define mark_dquot_dirty(dquot) do {\ + dquot->dq_flags |= DQ_MOD;\ + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_flags |= DQF_ANY_DQUOT_DIRTY;\ +} while (0) + +#define dquot_dirty(dquot) ((dquot)->dq_flags & DQ_MOD) + +static inline int is_enabled(struct quota_info *dqopt, int type) +{ + switch (type) { + case USRQUOTA: + return dqopt->flags & DQUOT_USR_ENABLED; + case GRPQUOTA: + return dqopt->flags & DQUOT_GRP_ENABLED; + } + return 0; +} + +#define sb_any_quota_enabled(sb) (is_enabled(sb_dqopt(sb), USRQUOTA) | is_enabled(sb_dqopt(sb), GRPQUOTA)) + +#define sb_has_quota_enabled(sb, type) (is_enabled(sb_dqopt(sb), type)) + +int register_quota_format(struct quota_format_type *fmt); +void unregister_quota_format(struct quota_format_type *fmt); + #else # /* nodep */ include __BEGIN_DECLS -long quotactl __P ((int, const char *, int, caddr_t)); +long quotactl __P ((unsigned int, const char *, int, caddr_t)); __END_DECLS #endif /* __KERNEL__ */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/quotaio_v1.h linux.21pre4-ac2/include/linux/quotaio_v1.h --- linux.21pre4/include/linux/quotaio_v1.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/include/linux/quotaio_v1.h 2003-02-01 16:51:26.000000000 +0000 @@ -0,0 +1,33 @@ +#ifndef _LINUX_QUOTAIO_V1_H +#define _LINUX_QUOTAIO_V1_H + +#include + +/* + * The following constants define the amount of time given a user + * before the soft limits are treated as hard limits (usually resulting + * in an allocation failure). The timer is started when the user crosses + * their soft limit, it is reset when they go below their soft limit. + */ +#define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */ +#define MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */ + +/* + * The following structure defines the format of the disk quota file + * (as it appears on disk) - the file is an array of these structures + * indexed by user or group number. + */ +struct v1_disk_dqblk { + __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */ + __u32 dqb_bsoftlimit; /* preferred limit on disk blks */ + __u32 dqb_curblocks; /* current block count */ + __u32 dqb_ihardlimit; /* absolute limit on allocated inodes */ + __u32 dqb_isoftlimit; /* preferred inode limit */ + __u32 dqb_curinodes; /* current # allocated inodes */ + time_t dqb_btime; /* time limit for excessive disk use */ + time_t dqb_itime; /* time limit for excessive inode use */ +}; + +#define v1_dqoff(UID) ((loff_t)((UID) * sizeof (struct v1_disk_dqblk))) + +#endif /* _LINUX_QUOTAIO_V1_H */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/quotaio_v2.h linux.21pre4-ac2/include/linux/quotaio_v2.h --- linux.21pre4/include/linux/quotaio_v2.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/include/linux/quotaio_v2.h 2003-02-01 16:51:26.000000000 +0000 @@ -0,0 +1,79 @@ +/* + * Definitions of structures for vfsv0 quota format + */ + +#ifndef _LINUX_QUOTAIO_V2_H +#define _LINUX_QUOTAIO_V2_H + +#include +#include + +/* + * Definitions of magics and versions of current quota files + */ +#define V2_INITQMAGICS {\ + 0xd9c01f11, /* USRQUOTA */\ + 0xd9c01927 /* GRPQUOTA */\ +} + +#define V2_INITQVERSIONS {\ + 0, /* USRQUOTA */\ + 0 /* GRPQUOTA */\ +} + +/* + * The following structure defines the format of the disk quota file + * (as it appears on disk) - the file is a radix tree whose leaves point + * to blocks of these structures. + */ +struct v2_disk_dqblk { + __u32 dqb_id; /* id this quota applies to */ + __u32 dqb_ihardlimit; /* absolute limit on allocated inodes */ + __u32 dqb_isoftlimit; /* preferred inode limit */ + __u32 dqb_curinodes; /* current # allocated inodes */ + __u32 dqb_bhardlimit; /* absolute limit on disk space (in QUOTABLOCK_SIZE) */ + __u32 dqb_bsoftlimit; /* preferred limit on disk space (in QUOTABLOCK_SIZE) */ + __u64 dqb_curspace; /* current space occupied (in bytes) */ + __u64 dqb_btime; /* time limit for excessive disk use */ + __u64 dqb_itime; /* time limit for excessive inode use */ +}; + +/* + * Here are header structures as written on disk and their in-memory copies + */ +/* First generic header */ +struct v2_disk_dqheader { + __u32 dqh_magic; /* Magic number identifying file */ + __u32 dqh_version; /* File version */ +}; + +/* Header with type and version specific information */ +struct v2_disk_dqinfo { + __u32 dqi_bgrace; /* Time before block soft limit becomes hard limit */ + __u32 dqi_igrace; /* Time before inode soft limit becomes hard limit */ + __u32 dqi_flags; /* Flags for quotafile (DQF_*) */ + __u32 dqi_blocks; /* Number of blocks in file */ + __u32 dqi_free_blk; /* Number of first free block in the list */ + __u32 dqi_free_entry; /* Number of block with at least one free entry */ +}; + +/* + * Structure of header of block with quota structures. It is padded to 16 bytes so + * there will be space for exactly 18 quota-entries in a block + */ +struct v2_disk_dqdbheader { + __u32 dqdh_next_free; /* Number of next block with free entry */ + __u32 dqdh_prev_free; /* Number of previous block with free entry */ + __u16 dqdh_entries; /* Number of valid entries in block */ + __u16 dqdh_pad1; + __u32 dqdh_pad2; +}; + +#define V2_DQINFOOFF sizeof(struct v2_disk_dqheader) /* Offset of info header in file */ +#define V2_DQBLKSIZE_BITS 10 +#define V2_DQBLKSIZE (1 << V2_DQBLKSIZE_BITS) /* Size of block with quota structures */ +#define V2_DQTREEOFF 1 /* Offset of tree in file in blocks */ +#define V2_DQTREEDEPTH 4 /* Depth of quota tree */ +#define V2_DQSTRINBLK ((V2_DQBLKSIZE - sizeof(struct v2_disk_dqdbheader)) / sizeof(struct v2_disk_dqblk)) /* Number of entries in one blocks */ + +#endif /* _LINUX_QUOTAIO_V2_H */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/quotaops.h linux.21pre4-ac2/include/linux/quotaops.h --- linux.21pre4/include/linux/quotaops.h 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/include/linux/quotaops.h 2003-02-04 15:54:23.000000000 +0000 @@ -20,15 +20,16 @@ /* * declaration of quota_function calls in kernel. */ -extern void dquot_initialize(struct inode *inode, short type); +extern void sync_dquots_dev(kdev_t dev, int type); +extern void sync_dquots_sb(struct super_block *sb, int type); + +extern void dquot_initialize(struct inode *inode, int type); extern void dquot_drop(struct inode *inode); -extern int quota_off(struct super_block *sb, short type); -extern int sync_dquots(kdev_t dev, short type); -extern int dquot_alloc_block(struct inode *inode, unsigned long number, char prealloc); +extern int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc); extern int dquot_alloc_inode(const struct inode *inode, unsigned long number); -extern void dquot_free_block(struct inode *inode, unsigned long number); +extern void dquot_free_space(struct inode *inode, qsize_t number); extern void dquot_free_inode(const struct inode *inode, unsigned long number); extern int dquot_transfer(struct inode *inode, struct iattr *iattr); @@ -36,11 +37,15 @@ /* * Operations supported for diskquotas. */ -#define sb_any_quota_enabled(sb) ((sb)->s_dquot.flags & (DQUOT_USR_ENABLED | DQUOT_GRP_ENABLED)) +extern struct dquot_operations dquot_operations; +extern struct quotactl_ops vfs_quotactl_ops; + +#define sb_dquot_ops (&dquot_operations) +#define sb_quotactl_ops (&vfs_quotactl_ops) static __inline__ void DQUOT_INIT(struct inode *inode) { - if (!inode->i_sb) + if (!inode->i_sb) out_of_line_bug(); lock_kernel(); if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) @@ -59,50 +64,50 @@ unlock_kernel(); } -static __inline__ int DQUOT_PREALLOC_BLOCK_NODIRTY(struct inode *inode, int nr) +static __inline__ int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) { lock_kernel(); if (sb_any_quota_enabled(inode->i_sb)) { - /* Number of used blocks is updated in alloc_block() */ - if (inode->i_sb->dq_op->alloc_block(inode, fs_to_dq_blocks(nr, inode->i_sb->s_blocksize), 1) == NO_QUOTA) { + /* Used space is updated in alloc_space() */ + if (inode->i_sb->dq_op->alloc_space(inode, nr, 1) == NO_QUOTA) { unlock_kernel(); return 1; } } else - inode->i_blocks += nr << (inode->i_sb->s_blocksize_bits - 9); + inode_add_bytes(inode, nr); unlock_kernel(); return 0; } -static __inline__ int DQUOT_PREALLOC_BLOCK(struct inode *inode, int nr) +static __inline__ int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr) { int ret; - if (!(ret = DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr))) + if (!(ret = DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr))) mark_inode_dirty(inode); return ret; } -static __inline__ int DQUOT_ALLOC_BLOCK_NODIRTY(struct inode *inode, int nr) +static __inline__ int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) { lock_kernel(); if (sb_any_quota_enabled(inode->i_sb)) { - /* Number of used blocks is updated in alloc_block() */ - if (inode->i_sb->dq_op->alloc_block(inode, fs_to_dq_blocks(nr, inode->i_sb->s_blocksize), 0) == NO_QUOTA) { + /* Used space is updated in alloc_space() */ + if (inode->i_sb->dq_op->alloc_space(inode, nr, 0) == NO_QUOTA) { unlock_kernel(); return 1; } } else - inode->i_blocks += nr << (inode->i_sb->s_blocksize_bits - 9); + inode_add_bytes(inode, nr); unlock_kernel(); return 0; } -static __inline__ int DQUOT_ALLOC_BLOCK(struct inode *inode, int nr) +static __inline__ int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr) { int ret; - if (!(ret = DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr))) + if (!(ret = DQUOT_ALLOC_SPACE_NODIRTY(inode, nr))) mark_inode_dirty(inode); return ret; } @@ -121,19 +126,19 @@ return 0; } -static __inline__ void DQUOT_FREE_BLOCK_NODIRTY(struct inode *inode, int nr) +static __inline__ void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr) { lock_kernel(); if (sb_any_quota_enabled(inode->i_sb)) - inode->i_sb->dq_op->free_block(inode, fs_to_dq_blocks(nr, inode->i_sb->s_blocksize)); + inode->i_sb->dq_op->free_space(inode, nr); else - inode->i_blocks -= nr << (inode->i_sb->s_blocksize_bits - 9); + inode_sub_bytes(inode, nr); unlock_kernel(); } -static __inline__ void DQUOT_FREE_BLOCK(struct inode *inode, int nr) +static __inline__ void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr) { - DQUOT_FREE_BLOCK_NODIRTY(inode, nr); + DQUOT_FREE_SPACE_NODIRTY(inode, nr); mark_inode_dirty(inode); } @@ -159,63 +164,85 @@ return 0; } -#define DQUOT_SYNC(dev) sync_dquots(dev, -1) -#define DQUOT_OFF(sb) quota_off(sb, -1) +#define DQUOT_SYNC_DEV(dev) sync_dquots_dev(dev, -1) +#define DQUOT_SYNC_SB(sb) sync_dquots_sb(sb, -1) + +static __inline__ int DQUOT_OFF(struct super_block *sb) +{ + int ret = -ENOSYS; + + lock_kernel(); + if (sb->s_qcop && sb->s_qcop->quota_off) + ret = sb->s_qcop->quota_off(sb, -1); + unlock_kernel(); + return ret; +} #else /* * NO-OP when quota not configured. */ +#define sb_dquot_ops (NULL) +#define sb_quotactl_ops (NULL) #define DQUOT_INIT(inode) do { } while(0) #define DQUOT_DROP(inode) do { } while(0) #define DQUOT_ALLOC_INODE(inode) (0) #define DQUOT_FREE_INODE(inode) do { } while(0) -#define DQUOT_SYNC(dev) do { } while(0) +#define DQUOT_SYNC_DEV(dev) do { } while(0) +#define DQUOT_SYNC_SB(sb) do { } while(0) #define DQUOT_OFF(sb) do { } while(0) #define DQUOT_TRANSFER(inode, iattr) (0) -extern __inline__ int DQUOT_PREALLOC_BLOCK_NODIRTY(struct inode *inode, int nr) +extern __inline__ int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) { lock_kernel(); - inode->i_blocks += nr << (inode->i_sb->s_blocksize_bits - 9); + inode_add_bytes(inode, nr); unlock_kernel(); return 0; } -extern __inline__ int DQUOT_PREALLOC_BLOCK(struct inode *inode, int nr) +extern __inline__ int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr) { - DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr); + DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr); mark_inode_dirty(inode); return 0; } -extern __inline__ int DQUOT_ALLOC_BLOCK_NODIRTY(struct inode *inode, int nr) +extern __inline__ int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) { lock_kernel(); - inode->i_blocks += nr << (inode->i_sb->s_blocksize_bits - 9); + inode_add_bytes(inode, nr); unlock_kernel(); return 0; } -extern __inline__ int DQUOT_ALLOC_BLOCK(struct inode *inode, int nr) +extern __inline__ int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr) { - DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr); + DQUOT_ALLOC_SPACE_NODIRTY(inode, nr); mark_inode_dirty(inode); return 0; } -extern __inline__ void DQUOT_FREE_BLOCK_NODIRTY(struct inode *inode, int nr) +extern __inline__ void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr) { lock_kernel(); - inode->i_blocks -= nr << (inode->i_sb->s_blocksize_bits - 9); + inode_sub_bytes(inode, nr); unlock_kernel(); } -extern __inline__ void DQUOT_FREE_BLOCK(struct inode *inode, int nr) +extern __inline__ void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr) { - DQUOT_FREE_BLOCK_NODIRTY(inode, nr); + DQUOT_FREE_SPACE_NODIRTY(inode, nr); mark_inode_dirty(inode); } #endif /* CONFIG_QUOTA */ + +#define DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr) DQUOT_PREALLOC_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) +#define DQUOT_PREALLOC_BLOCK(inode, nr) DQUOT_PREALLOC_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) +#define DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr) DQUOT_ALLOC_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) +#define DQUOT_ALLOC_BLOCK(inode, nr) DQUOT_ALLOC_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) +#define DQUOT_FREE_BLOCK_NODIRTY(inode, nr) DQUOT_FREE_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) +#define DQUOT_FREE_BLOCK(inode, nr) DQUOT_FREE_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) + #endif /* _LINUX_QUOTAOPS_ */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/rtc.h linux.21pre4-ac2/include/linux/rtc.h --- linux.21pre4/include/linux/rtc.h 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/include/linux/rtc.h 2003-01-28 16:35:25.000000000 +0000 @@ -39,10 +39,32 @@ struct rtc_time time; /* time the alarm is set to */ }; +/* + * Data structure to control PLL correction some better RTC feature + * pll_value is used to get or set current value of correction, + * the rest of the struct is used to query HW capabilities. + * This is modeled after the RTC used in Q40/Q60 computers but + * should be sufficiently flexible for other devices + * + * +ve pll_value means clock will run faster by + * pll_value*pll_posmult/pll_clock + * -ve pll_value means clock will run slower by + * pll_value*pll_negmult/pll_clock + */ + +struct rtc_pll_info { + int pll_ctrl; /* placeholder for fancier control */ + int pll_value; /* get/set correction value */ + int pll_max; /* max +ve (faster) adjustment value */ + int pll_min; /* max -ve (slower) adjustment value */ + int pll_posmult; /* factor for +ve corection */ + int pll_negmult; /* factor for -ve corection */ + long pll_clock; /* base PLL frequency */ +}; /* - * ioctl calls that are permitted to the /dev/rtc interface, if - * CONFIG_RTC/CONFIG_EFI_RTC was enabled. + * ioctl calls that are permitted to the /dev/rtc interface, if + * any of the RTC drivers are enabled. */ #define RTC_AIE_ON _IO('p', 0x01) /* Alarm int. enable on */ @@ -66,4 +88,6 @@ #define RTC_WKALM_SET _IOW('p', 0x0f, struct rtc_wkalrm)/* Set wakeup alarm*/ #define RTC_WKALM_RD _IOR('p', 0x10, struct rtc_wkalrm)/* Get wakeup alarm*/ +#define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */ +#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */ #endif /* _LINUX_RTC_H_ */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/sched.h linux.21pre4-ac2/include/linux/sched.h --- linux.21pre4/include/linux/sched.h 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/include/linux/sched.h 2003-02-04 15:54:22.000000000 +0000 @@ -73,16 +73,16 @@ #define CT_TO_SECS(x) ((x) / HZ) #define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ) -extern int nr_running, nr_threads; +extern int nr_threads; extern int last_pid; +extern unsigned long nr_running(void); +extern unsigned long nr_uninterruptible(void); #include #include #include #include -#ifdef __KERNEL__ #include -#endif #include @@ -119,12 +119,6 @@ #define SCHED_FIFO 1 #define SCHED_RR 2 -/* - * This is an additional bit set when we want to - * yield the CPU for one re-schedule.. - */ -#define SCHED_YIELD 0x10 - struct sched_param { int sched_priority; }; @@ -145,14 +139,21 @@ extern spinlock_t runqueue_lock; extern spinlock_t mmlist_lock; +typedef struct task_struct task_t; + extern void sched_init(void); -extern void init_idle(void); +extern void init_idle(task_t *idle, int cpu); +extern int idle_cpu(int cpu); extern void show_state(void); extern void cpu_init (void); extern void trap_init(void); extern void update_process_times(int user); -extern void update_one_process(struct task_struct *p, unsigned long user, +extern void update_one_process(task_t *p, unsigned long user, unsigned long system, int cpu); +extern void scheduler_tick(int user_tick, int system); +extern void migration_init(void); +extern unsigned long cache_decay_ticks; +extern int set_user(uid_t new_ruid, int dumpclear); #define MAX_SCHEDULE_TIMEOUT LONG_MAX extern signed long FASTCALL(schedule_timeout(signed long timeout)); @@ -164,6 +165,36 @@ extern int current_is_keventd(void); /* + * Priority of a process goes from 0..MAX_PRIO-1, valid RT + * priority is 0..MAX_RT_PRIO-1, and SCHED_OTHER tasks are + * in the range MAX_RT_PRIO..MAX_PRIO-1. Priority values + * are inverted: lower p->prio value means higher priority. + * + * The MAX_RT_USER_PRIO value allows the actual maximum + * RT priority to be separate from the value exported to + * user-space. This allows kernel threads to set their + * priority to a value higher than any user task. Note: + * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. + */ + +#define MAX_USER_RT_PRIO 100 +#define MAX_RT_PRIO MAX_USER_RT_PRIO + +#define MAX_PRIO (MAX_RT_PRIO + 40) + +/* + * The maximum RT priority is configurable. If the resulting + * bitmap is 160-bits , we can use a hand-coded routine which + * is optimal. Otherwise, we fall back on a generic routine for + * finding the first set bit from an arbitrarily-sized bitmap. + */ +#if MAX_PRIO < 160 && MAX_PRIO > 127 +#define sched_find_first_bit(map) _sched_find_first_bit(map) +#else +#define sched_find_first_bit(map) find_first_bit(map, MAX_PRIO) +#endif + +/* * The default fd array needs to be at least BITS_PER_LONG, * as this is the granularity returned by copy_fdset(). */ @@ -284,6 +315,8 @@ extern struct user_struct root_user; #define INIT_USER (&root_user) +typedef struct prio_array prio_array_t; + struct task_struct { /* * offsets of these are hardcoded elsewhere - touch with care @@ -301,34 +334,25 @@ int lock_depth; /* Lock depth */ -/* - * offset 32 begins here on 32-bit platforms. We keep - * all fields in a single cacheline that are needed for - * the goodness() loop in schedule(). - */ - long counter; - long nice; - unsigned long policy; - struct mm_struct *mm; - int processor; /* - * cpus_runnable is ~0 if the process is not running on any - * CPU. It's (1 << cpu) if it's running on a CPU. This mask - * is updated under the runqueue lock. - * - * To determine whether a process might run on a CPU, this - * mask is AND-ed with cpus_allowed. - */ - unsigned long cpus_runnable, cpus_allowed; - /* - * (only the 'next' pointer fits into the cacheline, but - * that's just fine.) + * offset 32 begins here on 32-bit platforms. */ + unsigned int cpu; + int prio, static_prio; struct list_head run_list; - unsigned long sleep_time; + prio_array_t *array; + + unsigned long sleep_avg; + unsigned long sleep_timestamp; + + unsigned long policy; + unsigned long cpus_allowed; + unsigned int time_slice; + + task_t *next_task, *prev_task; + + struct mm_struct *mm, *active_mm; - struct task_struct *next_task, *prev_task; - struct mm_struct *active_mm; struct list_head local_pages; unsigned int allocation_order, nr_local_pages; @@ -351,12 +375,12 @@ * older sibling, respectively. (p->father can be replaced with * p->p_pptr->pid) */ - struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr; + task_t *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr; struct list_head thread_group; /* PID hash table linkage. */ - struct task_struct *pidhash_next; - struct task_struct **pidhash_pprev; + task_t *pidhash_next; + task_t **pidhash_pprev; wait_queue_head_t wait_chldexit; /* for wait4() */ struct completion *vfork_done; /* for vfork() */ @@ -454,10 +478,16 @@ */ #define _STK_LIM (8*1024*1024) -#define DEF_COUNTER (10*HZ/100) /* 100 ms time slice */ -#define MAX_COUNTER (20*HZ/100) -#define DEF_NICE (0) +#if CONFIG_SMP +extern void set_cpus_allowed(task_t *p, unsigned long new_mask); +#else +#define set_cpus_allowed(p, new_mask) do { } while (0) +#endif +extern void set_user_nice(task_t *p, long nice); +extern int task_prio(task_t *p); +extern int task_nice(task_t *p); +extern int idle_cpu(int cpu); extern void yield(void); /* @@ -477,14 +507,14 @@ addr_limit: KERNEL_DS, \ exec_domain: &default_exec_domain, \ lock_depth: -1, \ - counter: DEF_COUNTER, \ - nice: DEF_NICE, \ + prio: MAX_PRIO-20, \ + static_prio: MAX_PRIO-20, \ policy: SCHED_OTHER, \ + cpus_allowed: ~0UL, \ mm: NULL, \ active_mm: &init_mm, \ - cpus_runnable: ~0UL, \ - cpus_allowed: ~0UL, \ run_list: LIST_HEAD_INIT(tsk.run_list), \ + time_slice: HZ, \ next_task: &tsk, \ prev_task: &tsk, \ p_opptr: &tsk, \ @@ -518,24 +548,24 @@ #endif union task_union { - struct task_struct task; + task_t task; unsigned long stack[INIT_TASK_SIZE/sizeof(long)]; }; extern union task_union init_task_union; extern struct mm_struct init_mm; -extern struct task_struct *init_tasks[NR_CPUS]; +extern task_t *init_tasks[NR_CPUS]; /* PID hashing. (shouldnt this be dynamic?) */ #define PIDHASH_SZ (4096 >> 2) -extern struct task_struct *pidhash[PIDHASH_SZ]; +extern task_t *pidhash[PIDHASH_SZ]; #define pid_hashfn(x) ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1)) -static inline void hash_pid(struct task_struct *p) +static inline void hash_pid(task_t *p) { - struct task_struct **htable = &pidhash[pid_hashfn(p->pid)]; + task_t **htable = &pidhash[pid_hashfn(p->pid)]; if((p->pidhash_next = *htable) != NULL) (*htable)->pidhash_pprev = &p->pidhash_next; @@ -543,16 +573,16 @@ p->pidhash_pprev = htable; } -static inline void unhash_pid(struct task_struct *p) +static inline void unhash_pid(task_t *p) { if(p->pidhash_next) p->pidhash_next->pidhash_pprev = p->pidhash_pprev; *p->pidhash_pprev = p->pidhash_next; } -static inline struct task_struct *find_task_by_pid(int pid) +static inline task_t *find_task_by_pid(int pid) { - struct task_struct *p, **htable = &pidhash[pid_hashfn(pid)]; + task_t *p, **htable = &pidhash[pid_hashfn(pid)]; for(p = *htable; p && p->pid != pid; p = p->pidhash_next) ; @@ -560,19 +590,6 @@ return p; } -#define task_has_cpu(tsk) ((tsk)->cpus_runnable != ~0UL) - -static inline void task_set_cpu(struct task_struct *tsk, unsigned int cpu) -{ - tsk->processor = cpu; - tsk->cpus_runnable = 1UL << cpu; -} - -static inline void task_release_cpu(struct task_struct *tsk) -{ - tsk->cpus_runnable = ~0UL; -} - /* per-UID process charging. */ extern struct user_struct * alloc_uid(uid_t); extern void free_uid(struct user_struct *); @@ -599,47 +616,51 @@ extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q)); extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q, signed long timeout)); -extern int FASTCALL(wake_up_process(struct task_struct * tsk)); +extern int FASTCALL(wake_up_process(task_t * tsk)); +extern void FASTCALL(wake_up_forked_process(task_t * tsk)); +extern void FASTCALL(sched_exit(task_t * p)); #define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1) #define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr) #define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0) -#define wake_up_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1) -#define wake_up_sync_nr(x, nr) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr) #define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1) #define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr) #define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0) -#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1) -#define wake_up_interruptible_sync_nr(x, nr) __wake_up_sync((x),TASK_INTERRUPTIBLE, nr) +#ifdef CONFIG_SMP +#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1) +#else +#define wake_up_interruptible_sync(x) __wake_up((x),TASK_INTERRUPTIBLE, 1) +#endif + asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru); extern int in_group_p(gid_t); extern int in_egroup_p(gid_t); extern void proc_caches_init(void); -extern void flush_signals(struct task_struct *); -extern void flush_signal_handlers(struct task_struct *); +extern void flush_signals(task_t *); +extern void flush_signal_handlers(task_t *); extern void sig_exit(int, int, struct siginfo *); extern int dequeue_signal(sigset_t *, siginfo_t *); extern void block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask); extern void unblock_all_signals(void); -extern int send_sig_info(int, struct siginfo *, struct task_struct *); -extern int force_sig_info(int, struct siginfo *, struct task_struct *); +extern int send_sig_info(int, struct siginfo *, task_t *); +extern int force_sig_info(int, struct siginfo *, task_t *); extern int kill_pg_info(int, struct siginfo *, pid_t); extern int kill_sl_info(int, struct siginfo *, pid_t); extern int kill_proc_info(int, struct siginfo *, pid_t); -extern void notify_parent(struct task_struct *, int); -extern void do_notify_parent(struct task_struct *, int); -extern void force_sig(int, struct task_struct *); -extern int send_sig(int, struct task_struct *, int); +extern void notify_parent(task_t *, int); +extern void do_notify_parent(task_t *, int); +extern void force_sig(int, task_t *); +extern int send_sig(int, task_t *, int); extern int kill_pg(pid_t, int, int); extern int kill_sl(pid_t, int, int); extern int kill_proc(pid_t, int, int); extern int do_sigaction(int, const struct k_sigaction *, struct k_sigaction *); extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long); -static inline int signal_pending(struct task_struct *p) +static inline int signal_pending(task_t *p) { return (p->sigpending != 0); } @@ -678,7 +699,7 @@ This is required every time the blocked sigset_t changes. All callers should have t->sigmask_lock. */ -static inline void recalc_sigpending(struct task_struct *t) +static inline void recalc_sigpending(task_t *t) { t->sigpending = has_pending_signals(&t->pending.signal, &t->blocked); } @@ -785,16 +806,17 @@ extern int expand_fdset(struct files_struct *, int nr); extern void free_fdset(fd_set *, int); -extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); +extern int copy_thread(int, unsigned long, unsigned long, unsigned long, task_t *, struct pt_regs *); extern void flush_thread(void); extern void exit_thread(void); -extern void exit_mm(struct task_struct *); -extern void exit_files(struct task_struct *); -extern void exit_sighand(struct task_struct *); +extern void exit_mm(task_t *); +extern void exit_files(task_t *); +extern void exit_sighand(task_t *); extern void reparent_to_init(void); extern void daemonize(void); +extern task_t *child_reaper; extern int do_execve(char *, char **, char **, struct pt_regs *); extern int do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long); @@ -803,6 +825,9 @@ extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)); extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); +extern void wait_task_inactive(task_t * p); +extern void kick_if_running(task_t * p); + #define __wait_event(wq, condition) \ do { \ wait_queue_t __wait; \ @@ -884,27 +909,12 @@ for (task = next_thread(current) ; task != current ; task = next_thread(task)) #define next_thread(p) \ - list_entry((p)->thread_group.next, struct task_struct, thread_group) + list_entry((p)->thread_group.next, task_t, thread_group) #define thread_group_leader(p) (p->pid == p->tgid) -static inline void del_from_runqueue(struct task_struct * p) -{ - nr_running--; - p->sleep_time = jiffies; - list_del(&p->run_list); - p->run_list.next = NULL; -} - -static inline int task_on_runqueue(struct task_struct *p) +static inline void unhash_process(task_t *p) { - return (p->run_list.next != NULL); -} - -static inline void unhash_process(struct task_struct *p) -{ - if (task_on_runqueue(p)) - out_of_line_bug(); write_lock_irq(&tasklist_lock); nr_threads--; unhash_pid(p); @@ -914,12 +924,12 @@ } /* Protects ->fs, ->files, ->mm, and synchronises with wait4(). Nests inside tasklist_lock */ -static inline void task_lock(struct task_struct *p) +static inline void task_lock(task_t *p) { spin_lock(&p->alloc_lock); } -static inline void task_unlock(struct task_struct *p) +static inline void task_unlock(task_t *p) { spin_unlock(&p->alloc_lock); } @@ -943,9 +953,29 @@ return res; } +static inline void set_need_resched(void) +{ + current->need_resched = 1; +} + +static inline void clear_need_resched(void) +{ + current->need_resched = 0; +} + +static inline void set_tsk_need_resched(struct task_struct *tsk) +{ + tsk->need_resched = 1; +} + +static inline void clear_tsk_need_resched(struct task_struct *tsk) +{ + tsk->need_resched = 0; +} + static inline int need_resched(void) { - return (unlikely(current->need_resched)); + return unlikely(current->need_resched); } extern void __cond_resched(void); @@ -955,5 +985,34 @@ __cond_resched(); } +/* + * Wrappers for p->cpu access. No-op on UP. + */ +#ifdef CONFIG_SMP + +static inline unsigned int task_cpu(struct task_struct *p) +{ + return p->cpu; +} + +static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) +{ + p->cpu = cpu; +} + +#else + +static inline unsigned int task_cpu(struct task_struct *p) +{ + return 0; +} + +static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) +{ +} + +#endif /* CONFIG_SMP */ + #endif /* __KERNEL__ */ + #endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/serialP.h linux.21pre4-ac2/include/linux/serialP.h --- linux.21pre4/include/linux/serialP.h 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/include/linux/serialP.h 2003-02-04 15:54:22.000000000 +0000 @@ -83,6 +83,7 @@ long pgrp; /* pgrp of opening process */ struct circ_buf xmit; spinlock_t xmit_lock; + spinlock_t irq_spinlock; u8 *iomem_base; u16 iomem_reg_shift; int io_type; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/skbuff.h linux.21pre4-ac2/include/linux/skbuff.h --- linux.21pre4/include/linux/skbuff.h 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/include/linux/skbuff.h 2003-02-04 15:54:22.000000000 +0000 @@ -240,6 +240,7 @@ int newheadroom, int newtailroom, int priority); +extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad); #define dev_kfree_skb(a) kfree_skb(a) extern void skb_over_panic(struct sk_buff *skb, int len, void *here); extern void skb_under_panic(struct sk_buff *skb, int len, void *here); @@ -1082,6 +1083,26 @@ } /** + * skb_padto - pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Returns the buffer, which may be a replacement + * for the original, or NULL for out of memory - in which case + * the original buffer is still freed. + */ + +static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len + skb->data_len; + if(likely(size >= len)) + return skb; + return skb_pad(skb, len-size); +} + +/** * skb_linearize - convert paged skb to linear one * @skb: buffer to linarize * @gfp: allocation mode diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/smp_balance.h linux.21pre4-ac2/include/linux/smp_balance.h --- linux.21pre4/include/linux/smp_balance.h 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/include/linux/smp_balance.h 2003-01-06 19:37:22.000000000 +0000 @@ -0,0 +1,10 @@ +#ifndef _LINUX_SMP_BALANCE_H +#define _LINUX_SMP_BALANCE_H + +#ifdef ARCH_HAS_SMP_BALANCE +#include +#else +#define arch_load_balance(x,y) 0 +#endif + +#endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/smp.h linux.21pre4-ac2/include/linux/smp.h --- linux.21pre4/include/linux/smp.h 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/include/linux/smp.h 2003-02-04 15:54:22.000000000 +0000 @@ -84,8 +84,16 @@ #define kernel_lock() #define cpu_logical_map(cpu) 0 #define cpu_number_map(cpu) 0 +#define cpu_online(cpu) ({ BUG_ON((cpu) != 0); 1; }) #define smp_call_function(func,info,retry,wait) ({ 0; }) #define cpu_online_map 1 - +static inline void smp_send_reschedule(int cpu) { } +static inline void smp_send_reschedule_all(void) { } #endif + +/* + * Common definitions: + */ +#define cpu() smp_processor_id() + #endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/sysctl.h linux.21pre4-ac2/include/linux/sysctl.h --- linux.21pre4/include/linux/sysctl.h 2003-01-29 17:07:58.000000000 +0000 +++ linux.21pre4-ac2/include/linux/sysctl.h 2003-02-04 15:54:23.000000000 +0000 @@ -546,7 +546,7 @@ FS_STATINODE=2, FS_MAXINODE=3, /* int:maximum number of inodes that can be allocated */ FS_NRDQUOT=4, /* int:current number of allocated dquots */ - FS_MAXDQUOT=5, /* int:maximum number of dquots that can be allocated */ + /* was FS_MAXDQUOT */ FS_NRFILE=6, /* int:current number of allocated filedescriptors */ FS_MAXFILE=7, /* int:maximum number of filedescriptors that can be allocated */ FS_DENTRY=8, @@ -557,6 +557,19 @@ FS_LEASES=13, /* int: leases enabled */ FS_DIR_NOTIFY=14, /* int: directory notification enabled */ FS_LEASE_TIME=15, /* int: maximum time to wait for a lease break */ + FS_DQSTATS=16, /* dir: disc quota usage statistics */ +}; + +/* /proc/sys/fs/quota/ */ +enum { + FS_DQ_LOOKUPS = 1, + FS_DQ_DROPS = 2, + FS_DQ_READS = 3, + FS_DQ_WRITES = 4, + FS_DQ_CACHE_HITS = 5, + FS_DQ_ALLOCATED = 6, + FS_DQ_FREE = 7, + FS_DQ_SYNCS = 8, }; /* CTL_DEBUG names: */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/usbdevice_fs.h linux.21pre4-ac2/include/linux/usbdevice_fs.h --- linux.21pre4/include/linux/usbdevice_fs.h 2003-01-29 16:26:34.000000000 +0000 +++ linux.21pre4-ac2/include/linux/usbdevice_fs.h 2003-02-04 15:54:23.000000000 +0000 @@ -142,6 +142,8 @@ #define USBDEVFS_HUB_PORTINFO _IOR('U', 19, struct usbdevfs_hub_portinfo) #define USBDEVFS_RESET _IO('U', 20) #define USBDEVFS_CLEAR_HALT _IOR('U', 21, unsigned int) +#define USBDEVFS_DISCONNECT _IO('U', 22) +#define USBDEVFS_CONNECT _IO('U', 23) /* --------------------------------------------------------------------- */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/usb.h linux.21pre4-ac2/include/linux/usb.h --- linux.21pre4/include/linux/usb.h 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/include/linux/usb.h 2003-02-04 15:54:23.000000000 +0000 @@ -865,6 +865,7 @@ struct usb_device *children[USB_MAXCHILDREN]; }; +extern int usb_ifnum_to_ifpos(struct usb_device *dev, unsigned ifnum); extern struct usb_interface *usb_ifnum_to_if(struct usb_device *dev, unsigned ifnum); extern struct usb_endpoint_descriptor *usb_epnum_to_ep_desc(struct usb_device *dev, unsigned epnum); @@ -873,6 +874,7 @@ extern void usb_scan_devices(void); /* used these for multi-interface device registration */ +extern int usb_find_interface_driver_for_ifnum(struct usb_device *dev, unsigned ifnum); extern void usb_driver_claim_interface(struct usb_driver *driver, struct usb_interface *iface, void* priv); extern int usb_interface_claimed(struct usb_interface *iface); extern void usb_driver_release_interface(struct usb_driver *driver, struct usb_interface *iface); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/vmalloc.h linux.21pre4-ac2/include/linux/vmalloc.h --- linux.21pre4/include/linux/vmalloc.h 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/include/linux/vmalloc.h 2003-02-04 15:54:22.000000000 +0000 @@ -26,6 +26,7 @@ extern void vmfree_area_pages(unsigned long address, unsigned long size); extern int vmalloc_area_pages(unsigned long address, unsigned long size, int gfp_mask, pgprot_t prot); +extern void *vcalloc(unsigned long nmemb, unsigned long elem_size); /* * Allocate any pages diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/include/linux/wait.h linux.21pre4-ac2/include/linux/wait.h --- linux.21pre4/include/linux/wait.h 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/include/linux/wait.h 2003-02-04 15:54:22.000000000 +0000 @@ -58,6 +58,7 @@ # define wq_read_unlock read_unlock # define wq_write_lock_irq write_lock_irq # define wq_write_lock_irqsave write_lock_irqsave +# define wq_write_unlock_irq write_unlock_irq # define wq_write_unlock_irqrestore write_unlock_irqrestore # define wq_write_unlock write_unlock #else @@ -70,6 +71,7 @@ # define wq_read_unlock_irqrestore spin_unlock_irqrestore # define wq_write_lock_irq spin_lock_irq # define wq_write_lock_irqsave spin_lock_irqsave +# define wq_write_unlock_irq spin_unlock_irq # define wq_write_unlock_irqrestore spin_unlock_irqrestore # define wq_write_unlock spin_unlock #endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/init/do_mounts.c linux.21pre4-ac2/init/do_mounts.c --- linux.21pre4/init/do_mounts.c 2003-01-29 17:07:59.000000000 +0000 +++ linux.21pre4-ac2/init/do_mounts.c 2003-02-04 14:56:12.000000000 +0000 @@ -164,7 +164,6 @@ { "dasdg", (DASD_MAJOR << MINORBITS) + (6 << 2) }, { "dasdh", (DASD_MAJOR << MINORBITS) + (7 << 2) }, #endif -#if defined(CONFIG_BLK_CPQ_DA) || defined(CONFIG_BLK_CPQ_DA_MODULE) { "ida/c0d0p",0x4800 }, { "ida/c0d1p",0x4810 }, { "ida/c0d2p",0x4820 }, @@ -188,8 +187,6 @@ { "ida/c5d0p",0x4D00 }, { "ida/c6d0p",0x4E00 }, { "ida/c7d0p",0x4F00 }, -#endif -#if defined(CONFIG_BLK_CPQ_CISS_DA) || defined(CONFIG_BLK_CPQ_CISS_DA_MODULE) { "cciss/c0d0p",0x6800 }, { "cciss/c0d1p",0x6810 }, { "cciss/c0d2p",0x6820 }, @@ -213,7 +210,6 @@ { "cciss/c5d0p",0x6D00 }, { "cciss/c6d0p",0x6E00 }, { "cciss/c7d0p",0x6F00 }, -#endif { "ataraid/d0p",0x7200 }, { "ataraid/d1p",0x7210 }, { "ataraid/d2p",0x7220 }, @@ -230,6 +226,24 @@ { "ataraid/d13p",0x72D0 }, { "ataraid/d14p",0x72E0 }, { "ataraid/d15p",0x72F0 }, + { "rd/c0d0p",0x3000 }, + { "rd/c0d0p1",0x3001 }, + { "rd/c0d0p2",0x3002 }, + { "rd/c0d0p3",0x3003 }, + { "rd/c0d0p4",0x3004 }, + { "rd/c0d0p5",0x3005 }, + { "rd/c0d0p6",0x3006 }, + { "rd/c0d0p7",0x3007 }, + { "rd/c0d0p8",0x3008 }, + { "rd/c0d1p",0x3008 }, + { "rd/c0d1p1",0x3009 }, + { "rd/c0d1p2",0x300a }, + { "rd/c0d1p3",0x300b }, + { "rd/c0d1p4",0x300c }, + { "rd/c0d1p5",0x300d }, + { "rd/c0d1p6",0x300e }, + { "rd/c0d1p7",0x300f }, + { "rd/c0d1p8",0x3010 }, { "nftla", 0x5d00 }, { "nftlb", 0x5d10 }, { "nftlc", 0x5d20 }, @@ -239,6 +253,7 @@ { "ftlc", 0x2c10 }, { "ftld", 0x2c18 }, { "mtdblock", 0x1f00 }, + { "nb", 0x2b00 }, { NULL, 0 } }; @@ -891,6 +906,8 @@ mount_devfs_fs (); } +#ifdef CONFIG_BLK_DEV_RAM + #if defined(BUILD_CRAMDISK) && defined(CONFIG_BLK_DEV_RAM) /* @@ -1037,3 +1054,4 @@ } #endif /* BUILD_CRAMDISK && CONFIG_BLK_DEV_RAM */ +#endif /* CONFIG_BLK_DEV_RAM */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/init/main.c linux.21pre4-ac2/init/main.c --- linux.21pre4/init/main.c 2003-01-29 16:26:32.000000000 +0000 +++ linux.21pre4-ac2/init/main.c 2003-01-06 19:25:48.000000000 +0000 @@ -60,6 +60,10 @@ #include #endif +#ifdef CONFIG_PNPBIOS +#include +#endif + #ifdef CONFIG_IRDA extern int irda_proto_init(void); extern int irda_device_init(void); @@ -106,6 +110,10 @@ #if defined(CONFIG_SYSVIPC) extern void ipc_init(void); #endif + +#ifdef CONFIG_PARISC +extern void parisc_init(void); +#endif /* * Boot command-line arguments @@ -288,8 +296,6 @@ extern void setup_arch(char **); extern void cpu_idle(void); -unsigned long wait_init_idle; - #ifndef CONFIG_SMP #ifdef CONFIG_X86_LOCAL_APIC @@ -298,34 +304,24 @@ APIC_init_uniprocessor(); } #else -#define smp_init() do { } while (0) +#define smp_init() do { } while (0) #endif #else - /* Called by boot processor to activate the rest. */ static void __init smp_init(void) { /* Get other processors into their bootup holding patterns. */ smp_boot_cpus(); - wait_init_idle = cpu_online_map; - clear_bit(current->processor, &wait_init_idle); /* Don't wait on me! */ smp_threads_ready=1; smp_commence(); - - /* Wait for the other cpus to set up their idle processes */ - printk("Waiting on wait_init_idle (map = 0x%lx)\n", wait_init_idle); - while (wait_init_idle) { - cpu_relax(); - barrier(); - } - printk("All processors have done init_idle\n"); } #endif + /* * We need to finalize in a non-__init function or else race conditions * between the root thread and the init thread may cause start_kernel to @@ -337,9 +333,8 @@ { kernel_thread(init, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL); unlock_kernel(); - current->need_resched = 1; - cpu_idle(); -} + cpu_idle(); +} /* * Activate the first processor. @@ -420,18 +415,25 @@ #ifdef CONFIG_PROC_FS proc_root_init(); #endif +#ifdef CONFIG_PARISC + parisc_init(); +#endif #if defined(CONFIG_SYSVIPC) ipc_init(); #endif check_bugs(); + printk("POSIX conformance testing by UNIFIX\n"); - /* - * We count on the initial thread going ok - * Like idlers init is an unlocked kernel thread, which will - * make syscalls (and thus be locked). + init_idle(current, smp_processor_id()); + /* + * We count on the initial thread going ok + * Like idlers init is an unlocked kernel thread, which will + * make syscalls (and thus be locked). */ smp_init(); + + /* Do the rest non-__init'ed, we're now alive */ rest_init(); } @@ -460,6 +462,10 @@ */ static void __init do_basic_setup(void) { + /* Start the per-CPU migration threads */ +#if CONFIG_SMP + migration_init(); +#endif /* * Tell the world that we're going to be the grim @@ -519,6 +525,9 @@ #ifdef CONFIG_ISAPNP isapnp_init(); #endif +#ifdef CONFIG_PNPBIOS + pnpbios_init(); +#endif #ifdef CONFIG_TC tc_init(); #endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/ipc/msg.c linux.21pre4-ac2/ipc/msg.c --- linux.21pre4/ipc/msg.c 2003-01-29 16:26:40.000000000 +0000 +++ linux.21pre4-ac2/ipc/msg.c 2003-01-10 16:21:40.000000000 +0000 @@ -815,10 +815,12 @@ schedule(); current->state = TASK_RUNNING; + /* This introduces a race so we must always take + the slow path msg = (struct msg_msg*) msr_d.r_msg; if(!IS_ERR(msg)) goto out_success; - + */ t = msg_lock(msqid); if(t==NULL) msqid=-1; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/ipc/shm.c linux.21pre4-ac2/ipc/shm.c --- linux.21pre4/ipc/shm.c 2003-01-29 16:26:40.000000000 +0000 +++ linux.21pre4-ac2/ipc/shm.c 2003-01-06 15:38:15.000000000 +0000 @@ -680,7 +680,7 @@ shmdnext = shmd->vm_next; if (shmd->vm_ops == &shm_vm_ops && shmd->vm_start - (shmd->vm_pgoff << PAGE_SHIFT) == (ulong) shmaddr) { - do_munmap(mm, shmd->vm_start, shmd->vm_end - shmd->vm_start); + do_munmap(mm, shmd->vm_start, shmd->vm_end - shmd->vm_start, 1); retval = 0; } } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/kernel/acct.c linux.21pre4-ac2/kernel/acct.c --- linux.21pre4/kernel/acct.c 2003-01-29 16:26:32.000000000 +0000 +++ linux.21pre4-ac2/kernel/acct.c 2003-01-06 17:34:02.000000000 +0000 @@ -53,6 +53,7 @@ #include #include #include +#include #include #include @@ -299,8 +300,10 @@ ac.ac_etime = encode_comp_t(jiffies - current->start_time); ac.ac_utime = encode_comp_t(current->times.tms_utime); ac.ac_stime = encode_comp_t(current->times.tms_stime); - ac.ac_uid = current->uid; - ac.ac_gid = current->gid; + ac.ac_uid = fs_high2lowuid(current->uid); + ac.ac_gid = fs_high2lowgid(current->gid); + ac.ac_uid32 = current->uid; + ac.ac_gid32 = current->gid; ac.ac_tty = (current->tty) ? kdev_t_to_nr(current->tty->device) : 0; ac.ac_flag = 0; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/kernel/capability.c linux.21pre4-ac2/kernel/capability.c --- linux.21pre4/kernel/capability.c 2003-01-29 16:26:32.000000000 +0000 +++ linux.21pre4-ac2/kernel/capability.c 2003-01-06 19:49:16.000000000 +0000 @@ -8,6 +8,8 @@ #include #include +unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */ + kernel_cap_t cap_bset = CAP_INIT_EFF_SET; /* Note: never hold tasklist_lock while spinning for this one */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/kernel/cpufreq.c linux.21pre4-ac2/kernel/cpufreq.c --- linux.21pre4/kernel/cpufreq.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/kernel/cpufreq.c 2003-01-10 16:27:32.000000000 +0000 @@ -0,0 +1,1253 @@ +/* + * linux/kernel/cpufreq.c + * + * Copyright (C) 2001 Russell King + * (C) 2002 Dominik Brodowski + * + * $Id: cpufreq.c,v 1.55 2003/01/10 08:39:18 db Exp $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifdef CONFIG_CPU_FREQ_24_API +#include +#endif + + +/** + * The "cpufreq driver" - the arch- or hardware-dependend low + * level driver of CPUFreq support, and its locking mutex. + * cpu_max_freq is in kHz. + */ +static struct cpufreq_driver *cpufreq_driver; +static DECLARE_MUTEX (cpufreq_driver_sem); + + +/** + * Two notifier lists: the "policy" list is involved in the + * validation process for a new CPU frequency policy; the + * "transition" list for kernel code that needs to handle + * changes to devices when the CPU clock speed changes. + * The mutex locks both lists. If both cpufreq_driver_sem + * and cpufreq_notifier_sem need to be hold, get cpufreq_driver_sem + * first. + */ +static struct notifier_block *cpufreq_policy_notifier_list; +static struct notifier_block *cpufreq_transition_notifier_list; +static DECLARE_MUTEX (cpufreq_notifier_sem); + + +/** + * The cpufreq default policy. Can be set by a "cpufreq=..." command + * line option. + */ +static struct cpufreq_policy default_policy = { + .cpu = CPUFREQ_ALL_CPUS, + .min = 0, + .max = 0, + .policy = 0, +}; + + +#ifdef CONFIG_CPU_FREQ_24_API +/** + * A few values needed by the 2.4.-compatible API + */ +static unsigned int cpu_max_freq[NR_CPUS]; +static unsigned int cpu_min_freq[NR_CPUS]; +static unsigned int cpu_cur_freq[NR_CPUS]; +#endif + + + +/********************************************************************* + * 2.6. API * + *********************************************************************/ + +/** + * cpufreq_parse_policy - parse a policy string + * @input_string: the string to parse. + * @policy: the policy written inside input_string + * + * This function parses a "policy string" - something the user echo'es into + * /proc/cpufreq or gives as boot parameter - into a struct cpufreq_policy. + * If there are invalid/missing entries, they are replaced with current + * cpufreq policy. + */ +static int cpufreq_parse_policy(char input_string[42], struct cpufreq_policy *policy) +{ + unsigned int min = 0; + unsigned int max = 0; + unsigned int cpu = 0; + char policy_string[42] = {'\0'}; + struct cpufreq_policy current_policy; + unsigned int result = -EFAULT; + unsigned int i = 0; + + if (!policy) + return -EINVAL; + + policy->min = 0; + policy->max = 0; + policy->policy = 0; + policy->cpu = CPUFREQ_ALL_CPUS; + + if (sscanf(input_string, "%d:%d:%d:%s", &cpu, &min, &max, policy_string) == 4) + { + policy->min = min; + policy->max = max; + policy->cpu = cpu; + result = 0; + goto scan_policy; + } + if (sscanf(input_string, "%d%%%d%%%d%%%s", &cpu, &min, &max, policy_string) == 4) + { + if (!cpufreq_get_policy(¤t_policy, cpu)) { + policy->min = (min * current_policy.cpuinfo.max_freq) / 100; + policy->max = (max * current_policy.cpuinfo.max_freq) / 100; + policy->cpu = cpu; + result = 0; + goto scan_policy; + } + } + + if (sscanf(input_string, "%d:%d:%s", &min, &max, policy_string) == 3) + { + policy->min = min; + policy->max = max; + result = 0; + goto scan_policy; + } + + if (sscanf(input_string, "%d%%%d%%%s", &min, &max, policy_string) == 3) + { + if (!cpufreq_get_policy(¤t_policy, cpu)) { + policy->min = (min * current_policy.cpuinfo.max_freq) / 100; + policy->max = (max * current_policy.cpuinfo.max_freq) / 100; + result = 0; + goto scan_policy; + } + } + + return -EINVAL; + +scan_policy: + + for (i=0;ipolicy = CPUFREQ_POLICY_POWERSAVE; + } + else if (!strncmp(policy_string, "performance",6) || + !strncmp(policy_string, "high",4) || + !strncmp(policy_string, "full",4)) + { + result = 0; + policy->policy = CPUFREQ_POLICY_PERFORMANCE; + } + else if (!cpufreq_get_policy(¤t_policy, policy->cpu)) + { + policy->policy = current_policy.policy; + } + else + { + policy->policy = 0; + } + + return result; +} + + +/* + * cpufreq command line parameter. Must be hard values (kHz) + * cpufreq=1000000:2000000:PERFORMANCE + * to set the default CPUFreq policy. + */ +static int __init cpufreq_setup(char *str) +{ + cpufreq_parse_policy(str, &default_policy); + default_policy.cpu = CPUFREQ_ALL_CPUS; + return 1; +} +__setup("cpufreq=", cpufreq_setup); + + +#ifdef CONFIG_PROC_FS + +/** + * cpufreq_proc_read - read /proc/cpufreq + * + * This function prints out the current cpufreq policy. + */ +static int cpufreq_proc_read ( + char *page, + char **start, + off_t off, + int count, + int *eof, + void *data) +{ + char *p = page; + int len = 0; + struct cpufreq_policy policy; + unsigned int min_pctg = 0; + unsigned int max_pctg = 0; + unsigned int i = 0; + + if (off != 0) + goto end; + + p += sprintf(p, " minimum CPU frequency - maximum CPU frequency - policy\n"); + for (i=0;icount) + len = count; + if (len<0) + len = 0; + + return len; +} + + +/** + * cpufreq_proc_write - handles writing into /proc/cpufreq + * + * This function calls the parsing script and then sets the policy + * accordingly. + */ +static int cpufreq_proc_write ( + struct file *file, + const char *buffer, + unsigned long count, + void *data) +{ + int result = 0; + char proc_string[42] = {'\0'}; + struct cpufreq_policy policy; + unsigned int i = 0; + + + if ((count > sizeof(proc_string) - 1)) + return -EINVAL; + + if (copy_from_user(proc_string, buffer, count)) + return -EFAULT; + + proc_string[count] = '\0'; + + result = cpufreq_parse_policy(proc_string, &policy); + if (result) + return -EFAULT; + + if (policy.cpu == CPUFREQ_ALL_CPUS) + { + for (i=0; iread_proc = cpufreq_proc_read; + entry->write_proc = cpufreq_proc_write; + } + + return 0; +} + + +/** + * cpufreq_proc_exit - removes "cpufreq" from the /proc root directory. + * + * This function removes "cpufreq" from the /proc root directory. + */ +static void cpufreq_proc_exit (void) +{ + remove_proc_entry("cpufreq", &proc_root); + return; +} +#endif /* CONFIG_PROC_FS */ + + + +/********************************************************************* + * 2.4. COMPATIBLE API * + *********************************************************************/ + +#ifdef CONFIG_CPU_FREQ_24_API +/** + * cpufreq_set - set the CPU frequency + * @freq: target frequency in kHz + * @cpu: CPU for which the frequency is to be set + * + * Sets the CPU frequency to freq. + */ +int cpufreq_set(unsigned int freq, unsigned int cpu) +{ + struct cpufreq_policy policy; + down(&cpufreq_driver_sem); + if (!cpufreq_driver || !freq || (cpu > NR_CPUS)) { + up(&cpufreq_driver_sem); + return -EINVAL; + } + + policy.min = freq; + policy.max = freq; + policy.policy = CPUFREQ_POLICY_POWERSAVE; + policy.cpu = cpu; + + up(&cpufreq_driver_sem); + + if (policy.cpu == CPUFREQ_ALL_CPUS) + { + unsigned int i; + unsigned int ret = 0; + for (i=0; iextra1; + int len, left = *lenp; + + if (!left || (filp->f_pos && !write) || !cpu_online(cpu)) { + *lenp = 0; + return 0; + } + + if (write) { + unsigned int freq; + + len = left; + if (left > sizeof(buf)) + left = sizeof(buf); + if (copy_from_user(buf, buffer, left)) + return -EFAULT; + buf[sizeof(buf) - 1] = '\0'; + + freq = simple_strtoul(buf, &p, 0); + cpufreq_set(freq, cpu); + } else { + len = sprintf(buf, "%d\n", cpufreq_get(cpu)); + if (len > left) + len = left; + if (copy_to_user(buffer, buf, len)) + return -EFAULT; + } + + *lenp = len; + filp->f_pos += len; + return 0; +} + +static int +cpufreq_sysctl(ctl_table *table, int *name, int nlen, + void *oldval, size_t *oldlenp, + void *newval, size_t newlen, void **context) +{ + int cpu = (int) table->extra1; + + if (!cpu_online(cpu)) + return -EINVAL; + + if (oldval && oldlenp) { + size_t oldlen; + + if (get_user(oldlen, oldlenp)) + return -EFAULT; + + if (oldlen != sizeof(unsigned int)) + return -EINVAL; + + if (put_user(cpufreq_get(cpu), (unsigned int *)oldval) || + put_user(sizeof(unsigned int), oldlenp)) + return -EFAULT; + } + if (newval && newlen) { + unsigned int freq; + + if (newlen != sizeof(unsigned int)) + return -EINVAL; + + if (get_user(freq, (unsigned int *)newval)) + return -EFAULT; + + cpufreq_set(freq, cpu); + } + return 1; +} + +/* ctl_table ctl_cpu_vars_{0,1,...,(NR_CPUS-1)} */ +/* due to NR_CPUS tweaking, a lot of if/endifs are required, sorry */ + CTL_TABLE_CPU_VARS(0); +#if NR_CPUS > 1 + CTL_TABLE_CPU_VARS(1); +#endif +#if NR_CPUS > 2 + CTL_TABLE_CPU_VARS(2); +#endif +#if NR_CPUS > 3 + CTL_TABLE_CPU_VARS(3); +#endif +#if NR_CPUS > 4 + CTL_TABLE_CPU_VARS(4); +#endif +#if NR_CPUS > 5 + CTL_TABLE_CPU_VARS(5); +#endif +#if NR_CPUS > 6 + CTL_TABLE_CPU_VARS(6); +#endif +#if NR_CPUS > 7 + CTL_TABLE_CPU_VARS(7); +#endif +#if NR_CPUS > 8 + CTL_TABLE_CPU_VARS(8); +#endif +#if NR_CPUS > 9 + CTL_TABLE_CPU_VARS(9); +#endif +#if NR_CPUS > 10 + CTL_TABLE_CPU_VARS(10); +#endif +#if NR_CPUS > 11 + CTL_TABLE_CPU_VARS(11); +#endif +#if NR_CPUS > 12 + CTL_TABLE_CPU_VARS(12); +#endif +#if NR_CPUS > 13 + CTL_TABLE_CPU_VARS(13); +#endif +#if NR_CPUS > 14 + CTL_TABLE_CPU_VARS(14); +#endif +#if NR_CPUS > 15 + CTL_TABLE_CPU_VARS(15); +#endif +#if NR_CPUS > 16 + CTL_TABLE_CPU_VARS(16); +#endif +#if NR_CPUS > 17 + CTL_TABLE_CPU_VARS(17); +#endif +#if NR_CPUS > 18 + CTL_TABLE_CPU_VARS(18); +#endif +#if NR_CPUS > 19 + CTL_TABLE_CPU_VARS(19); +#endif +#if NR_CPUS > 20 + CTL_TABLE_CPU_VARS(20); +#endif +#if NR_CPUS > 21 + CTL_TABLE_CPU_VARS(21); +#endif +#if NR_CPUS > 22 + CTL_TABLE_CPU_VARS(22); +#endif +#if NR_CPUS > 23 + CTL_TABLE_CPU_VARS(23); +#endif +#if NR_CPUS > 24 + CTL_TABLE_CPU_VARS(24); +#endif +#if NR_CPUS > 25 + CTL_TABLE_CPU_VARS(25); +#endif +#if NR_CPUS > 26 + CTL_TABLE_CPU_VARS(26); +#endif +#if NR_CPUS > 27 + CTL_TABLE_CPU_VARS(27); +#endif +#if NR_CPUS > 28 + CTL_TABLE_CPU_VARS(28); +#endif +#if NR_CPUS > 29 + CTL_TABLE_CPU_VARS(29); +#endif +#if NR_CPUS > 30 + CTL_TABLE_CPU_VARS(30); +#endif +#if NR_CPUS > 31 + CTL_TABLE_CPU_VARS(31); +#endif +#if NR_CPUS > 32 +#error please extend CPU enumeration +#endif + +/* due to NR_CPUS tweaking, a lot of if/endifs are required, sorry */ +static ctl_table ctl_cpu_table[NR_CPUS + 1] = { + CPU_ENUM(0), +#if NR_CPUS > 1 + CPU_ENUM(1), +#endif +#if NR_CPUS > 2 + CPU_ENUM(2), +#endif +#if NR_CPUS > 3 + CPU_ENUM(3), +#endif +#if NR_CPUS > 4 + CPU_ENUM(4), +#endif +#if NR_CPUS > 5 + CPU_ENUM(5), +#endif +#if NR_CPUS > 6 + CPU_ENUM(6), +#endif +#if NR_CPUS > 7 + CPU_ENUM(7), +#endif +#if NR_CPUS > 8 + CPU_ENUM(8), +#endif +#if NR_CPUS > 9 + CPU_ENUM(9), +#endif +#if NR_CPUS > 10 + CPU_ENUM(10), +#endif +#if NR_CPUS > 11 + CPU_ENUM(11), +#endif +#if NR_CPUS > 12 + CPU_ENUM(12), +#endif +#if NR_CPUS > 13 + CPU_ENUM(13), +#endif +#if NR_CPUS > 14 + CPU_ENUM(14), +#endif +#if NR_CPUS > 15 + CPU_ENUM(15), +#endif +#if NR_CPUS > 16 + CPU_ENUM(16), +#endif +#if NR_CPUS > 17 + CPU_ENUM(17), +#endif +#if NR_CPUS > 18 + CPU_ENUM(18), +#endif +#if NR_CPUS > 19 + CPU_ENUM(19), +#endif +#if NR_CPUS > 20 + CPU_ENUM(20), +#endif +#if NR_CPUS > 21 + CPU_ENUM(21), +#endif +#if NR_CPUS > 22 + CPU_ENUM(22), +#endif +#if NR_CPUS > 23 + CPU_ENUM(23), +#endif +#if NR_CPUS > 24 + CPU_ENUM(24), +#endif +#if NR_CPUS > 25 + CPU_ENUM(25), +#endif +#if NR_CPUS > 26 + CPU_ENUM(26), +#endif +#if NR_CPUS > 27 + CPU_ENUM(27), +#endif +#if NR_CPUS > 28 + CPU_ENUM(28), +#endif +#if NR_CPUS > 29 + CPU_ENUM(29), +#endif +#if NR_CPUS > 30 + CPU_ENUM(30), +#endif +#if NR_CPUS > 31 + CPU_ENUM(31), +#endif +#if NR_CPUS > 32 +#error please extend CPU enumeration +#endif + { + .ctl_name = 0, + } +}; + +static ctl_table ctl_cpu[2] = { + { + .ctl_name = CTL_CPU, + .procname = "cpu", + .mode = 0555, + .child = ctl_cpu_table, + }, + { + .ctl_name = 0, + } +}; + +struct ctl_table_header *cpufreq_sysctl_table; + +static inline void cpufreq_sysctl_init(void) +{ + cpufreq_sysctl_table = register_sysctl_table(ctl_cpu, 0); +} + +static inline void cpufreq_sysctl_exit(void) +{ + unregister_sysctl_table(cpufreq_sysctl_table); +} + +#else +#define cpufreq_sysctl_init() do {} while(0) +#define cpufreq_sysctl_exit() do {} while(0) +#endif /* CONFIG_SYSCTL */ +#endif /* CONFIG_CPU_FREQ_24_API */ + + + +/********************************************************************* + * NOTIFIER LISTS INTERFACE * + *********************************************************************/ + +/** + * cpufreq_register_notifier - register a driver with cpufreq + * @nb: notifier function to register + * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER + * + * Add a driver to one of two lists: either a list of drivers that + * are notified about clock rate changes (once before and once after + * the transition), or a list of drivers that are notified about + * changes in cpufreq policy. + * + * This function may sleep, and has the same return conditions as + * notifier_chain_register. + */ +int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) +{ + int ret; + + down(&cpufreq_notifier_sem); + switch (list) { + case CPUFREQ_TRANSITION_NOTIFIER: + ret = notifier_chain_register(&cpufreq_transition_notifier_list, nb); + break; + case CPUFREQ_POLICY_NOTIFIER: + ret = notifier_chain_register(&cpufreq_policy_notifier_list, nb); + break; + default: + ret = -EINVAL; + } + up(&cpufreq_notifier_sem); + + return ret; +} +EXPORT_SYMBOL(cpufreq_register_notifier); + + +/** + * cpufreq_unregister_notifier - unregister a driver with cpufreq + * @nb: notifier block to be unregistered + * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER + * + * Remove a driver from the CPU frequency notifier list. + * + * This function may sleep, and has the same return conditions as + * notifier_chain_unregister. + */ +int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) +{ + int ret; + + down(&cpufreq_notifier_sem); + switch (list) { + case CPUFREQ_TRANSITION_NOTIFIER: + ret = notifier_chain_unregister(&cpufreq_transition_notifier_list, nb); + break; + case CPUFREQ_POLICY_NOTIFIER: + ret = notifier_chain_unregister(&cpufreq_policy_notifier_list, nb); + break; + default: + ret = -EINVAL; + } + up(&cpufreq_notifier_sem); + + return ret; +} +EXPORT_SYMBOL(cpufreq_unregister_notifier); + + + +/********************************************************************* + * POLICY INTERFACE * + *********************************************************************/ + +/** + * cpufreq_get_policy - get the current cpufreq_policy + * @policy: struct cpufreq_policy into which the current cpufreq_policy is written + * + * Reads the current cpufreq policy. + */ +int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) +{ + down(&cpufreq_driver_sem); + if (!cpufreq_driver || !policy || + (cpu >= NR_CPUS) || (!cpu_online(cpu))) { + up(&cpufreq_driver_sem); + return -EINVAL; + } + + policy->min = cpufreq_driver->policy[cpu].min; + policy->max = cpufreq_driver->policy[cpu].max; + policy->policy = cpufreq_driver->policy[cpu].policy; + policy->cpuinfo.max_freq = cpufreq_driver->policy[cpu].cpuinfo.max_freq; + policy->cpuinfo.min_freq = cpufreq_driver->policy[cpu].cpuinfo.min_freq; + policy->cpuinfo.transition_latency = cpufreq_driver->policy[cpu].cpuinfo.transition_latency; + policy->cpu = cpu; + + up(&cpufreq_driver_sem); + + return 0; +} +EXPORT_SYMBOL(cpufreq_get_policy); + + +/** + * cpufreq_set_policy - set a new CPUFreq policy + * @policy: policy to be set. + * + * Sets a new CPU frequency and voltage scaling policy. + */ +int cpufreq_set_policy(struct cpufreq_policy *policy) +{ + int ret; + + down(&cpufreq_driver_sem); + if (!cpufreq_driver || !cpufreq_driver->verify || + !cpufreq_driver->setpolicy || !policy || + (policy->cpu >= NR_CPUS) || (!cpu_online(policy->cpu))) { + up(&cpufreq_driver_sem); + return -EINVAL; + } + + policy->cpuinfo.max_freq = cpufreq_driver->policy[policy->cpu].cpuinfo.max_freq; + policy->cpuinfo.min_freq = cpufreq_driver->policy[policy->cpu].cpuinfo.min_freq; + policy->cpuinfo.transition_latency = cpufreq_driver->policy[policy->cpu].cpuinfo.transition_latency; + + /* verify the cpu speed can be set within this limit */ + ret = cpufreq_driver->verify(policy); + if (ret) { + up(&cpufreq_driver_sem); + return ret; + } + + down(&cpufreq_notifier_sem); + + /* adjust if neccessary - all reasons */ + notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_ADJUST, + policy); + + /* adjust if neccessary - hardware incompatibility*/ + notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_INCOMPATIBLE, + policy); + + /* verify the cpu speed can be set within this limit, + which might be different to the first one */ + ret = cpufreq_driver->verify(policy); + if (ret) { + up(&cpufreq_notifier_sem); + up(&cpufreq_driver_sem); + return ret; + } + + /* notification of the new policy */ + notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_NOTIFY, + policy); + + up(&cpufreq_notifier_sem); + + cpufreq_driver->policy[policy->cpu].min = policy->min; + cpufreq_driver->policy[policy->cpu].max = policy->max; + cpufreq_driver->policy[policy->cpu].policy = policy->policy; + +#ifdef CONFIG_CPU_FREQ_24_API + cpu_cur_freq[policy->cpu] = policy->max; +#endif + + ret = cpufreq_driver->setpolicy(policy); + + up(&cpufreq_driver_sem); + + return ret; +} +EXPORT_SYMBOL(cpufreq_set_policy); + + + +/********************************************************************* + * EXTERNALLY AFFECTING FREQUENCY CHANGES * + *********************************************************************/ + +/** + * adjust_jiffies - adjust the system "loops_per_jiffy" + * + * This function alters the system "loops_per_jiffy" for the clock + * speed change. Note that loops_per_jiffy cannot be updated on SMP + * systems as each CPU might be scaled differently. So, use the arch + * per-CPU loops_per_jiffy value wherever possible. + */ +#ifndef CONFIG_SMP +static unsigned long l_p_j_ref = 0; +static unsigned int l_p_j_ref_freq = 0; + +static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) +{ + if (!l_p_j_ref_freq) { + l_p_j_ref = loops_per_jiffy; + l_p_j_ref_freq = ci->old; + } + if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || + (val == CPUFREQ_POSTCHANGE && ci->old > ci->new)) + loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); +} +#else +#define adjust_jiffies(x...) do {} while (0) +#endif + + +/** + * cpufreq_notify_transition - call notifier chain and adjust_jiffies on frequency transition + * + * This function calls the transition notifiers and the "adjust_jiffies" function. It is called + * twice on all CPU frequency changes that have external effects. + */ +void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) +{ + down(&cpufreq_notifier_sem); + switch (state) { + case CPUFREQ_PRECHANGE: + notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_PRECHANGE, freqs); + adjust_jiffies(CPUFREQ_PRECHANGE, freqs); + break; + case CPUFREQ_POSTCHANGE: + adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); + notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); +#ifdef CONFIG_CPU_FREQ_24_API + cpu_cur_freq[freqs->cpu] = freqs->new; +#endif + break; + } + up(&cpufreq_notifier_sem); +} +EXPORT_SYMBOL_GPL(cpufreq_notify_transition); + + + +/********************************************************************* + * REGISTER / UNREGISTER CPUFREQ DRIVER * + *********************************************************************/ + +/** + * cpufreq_register - register a CPU Frequency driver + * @driver_data: A struct cpufreq_driver containing the values submitted by the CPU Frequency driver. + * + * Registers a CPU Frequency driver to this core code. This code + * returns zero on success, -EBUSY when another driver got here first + * (and isn't unregistered in the meantime). + * + */ +int cpufreq_register(struct cpufreq_driver *driver_data) +{ + unsigned int ret; + unsigned int i; + struct cpufreq_policy policy; + + if (cpufreq_driver) + return -EBUSY; + + if (!driver_data || !driver_data->verify || + !driver_data->setpolicy) + return -EINVAL; + + down(&cpufreq_driver_sem); + cpufreq_driver = driver_data; + + /* check for a default policy - if it exists, use it on _all_ CPUs*/ + for (i=0; ipolicy[i].policy = default_policy.policy; + if (default_policy.min) + cpufreq_driver->policy[i].min = default_policy.min; + if (default_policy.max) + cpufreq_driver->policy[i].max = default_policy.max; + } + + /* set default policy on all CPUs. Must be called per-CPU and not + * with CPUFREQ_ALL_CPUs as there might be no common policy for all + * CPUs (UltraSPARC etc.) + */ + for (i=0; ipolicy[i].policy; + policy.min = cpufreq_driver->policy[i].min; + policy.max = cpufreq_driver->policy[i].max; + policy.cpu = i; + up(&cpufreq_driver_sem); + ret = cpufreq_set_policy(&policy); + down(&cpufreq_driver_sem); + if (ret) { + cpufreq_driver = NULL; + up(&cpufreq_driver_sem); + return ret; + } + } + + up(&cpufreq_driver_sem); + + cpufreq_proc_init(); + +#ifdef CONFIG_CPU_FREQ_24_API + down(&cpufreq_driver_sem); + for (i=0; ipolicy[i].cpuinfo.min_freq; + cpu_max_freq[i] = driver_data->policy[i].cpuinfo.max_freq; + cpu_cur_freq[i] = driver_data->cpu_cur_freq[i]; + } + up(&cpufreq_driver_sem); + + cpufreq_sysctl_init(); +#endif + + return 0; +} +EXPORT_SYMBOL_GPL(cpufreq_register); + + +/** + * cpufreq_unregister - unregister the current CPUFreq driver + * + * Unregister the current CPUFreq driver. Only call this if you have + * the right to do so, i.e. if you have succeeded in initialising before! + * Returns zero if successful, and -EINVAL if the cpufreq_driver is + * currently not initialised. + */ +int cpufreq_unregister(void) +{ + down(&cpufreq_driver_sem); + + if (!cpufreq_driver) { + up(&cpufreq_driver_sem); + return -EINVAL; + } + + cpufreq_driver = NULL; + + up(&cpufreq_driver_sem); + + cpufreq_proc_exit(); + +#ifdef CONFIG_CPU_FREQ_24_API + cpufreq_sysctl_exit(); +#endif + + return 0; +} +EXPORT_SYMBOL_GPL(cpufreq_unregister); + + +#ifdef CONFIG_PM +/** + * cpufreq_restore - restore the CPU clock frequency after resume + * + * Restore the CPU clock frequency so that our idea of the current + * frequency reflects the actual hardware. + */ +int cpufreq_restore(void) +{ + struct cpufreq_policy policy; + unsigned int i; + unsigned int ret = 0; + + if (in_interrupt()) + panic("cpufreq_restore() called from interrupt context!"); + + for (i=0;ipolicy[i].min; + policy.max = cpufreq_driver->policy[i].max; + policy.policy = cpufreq_driver->policy[i].policy; + policy.cpu = i; + up(&cpufreq_driver_sem); + + ret += cpufreq_set_policy(&policy); + } + + return ret; +} +EXPORT_SYMBOL_GPL(cpufreq_restore); +#else +#define cpufreq_restore() do {} while (0) +#endif /* CONFIG_PM */ + + +/********************************************************************* + * FREQUENCY TABLE HELPERS * + *********************************************************************/ + +int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table) +{ + unsigned int min_freq = ~0; + unsigned int max_freq = 0; + unsigned int i = 0; + + for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { + unsigned int freq = table[i].frequency; + if (freq == CPUFREQ_ENTRY_INVALID) + continue; + if (freq < min_freq) + min_freq = freq; + if (freq > max_freq) + max_freq = freq; + } + + policy->min = policy->cpuinfo.min_freq = min_freq; + policy->max = policy->cpuinfo.max_freq = max_freq; + + if (policy->min == ~0) + return -EINVAL; + else + return 0; +} +EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo); + + +int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table) +{ + unsigned int next_larger = ~0; + unsigned int i = 0; + unsigned int count = 0; + + if (!cpu_online(policy->cpu)) + return -EINVAL; + + cpufreq_verify_within_limits(policy, + policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + + for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { + unsigned int freq = table[i].frequency; + if (freq == CPUFREQ_ENTRY_INVALID) + continue; + if ((freq >= policy->min) && (freq <= policy->max)) + count++; + else if ((next_larger > freq) && (freq > policy->max)) + next_larger = freq; + } + + if (!count) + policy->max = next_larger; + + cpufreq_verify_within_limits(policy, + policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + + return 0; +} +EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify); + + +int cpufreq_frequency_table_setpolicy(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table, + unsigned int *index) +{ + struct cpufreq_frequency_table optimal = { .index = ~0, }; + unsigned int i; + + switch (policy->policy) { + case CPUFREQ_POLICY_PERFORMANCE: + optimal.frequency = 0; + break; + case CPUFREQ_POLICY_POWERSAVE: + optimal.frequency = ~0; + break; + } + + if (!cpu_online(policy->cpu)) + return -EINVAL; + + for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { + unsigned int freq = table[i].frequency; + if (freq == CPUFREQ_ENTRY_INVALID) + continue; + if ((freq < policy->min) || (freq > policy->max)) + continue; + switch(policy->policy) { + case CPUFREQ_POLICY_PERFORMANCE: + if (optimal.frequency <= freq) { + optimal.frequency = freq; + optimal.index = i; + } + break; + case CPUFREQ_POLICY_POWERSAVE: + if (optimal.frequency >= freq) { + optimal.frequency = freq; + optimal.index = i; + } + break; + } + } + if (optimal.index > i) + return -EINVAL; + + *index = optimal.index; + + return 0; +} +EXPORT_SYMBOL_GPL(cpufreq_frequency_table_setpolicy); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/kernel/exit.c linux.21pre4-ac2/kernel/exit.c --- linux.21pre4/kernel/exit.c 2003-01-29 16:26:32.000000000 +0000 +++ linux.21pre4-ac2/kernel/exit.c 2003-01-06 19:26:02.000000000 +0000 @@ -28,49 +28,22 @@ static void release_task(struct task_struct * p) { - if (p != current) { + if (p == current) + BUG(); #ifdef CONFIG_SMP - /* - * Wait to make sure the process isn't on the - * runqueue (active on some other CPU still) - */ - for (;;) { - task_lock(p); - if (!task_has_cpu(p)) - break; - task_unlock(p); - do { - cpu_relax(); - barrier(); - } while (task_has_cpu(p)); - } - task_unlock(p); + wait_task_inactive(p); #endif - atomic_dec(&p->user->processes); - free_uid(p->user); - unhash_process(p); - - release_thread(p); - current->cmin_flt += p->min_flt + p->cmin_flt; - current->cmaj_flt += p->maj_flt + p->cmaj_flt; - current->cnswap += p->nswap + p->cnswap; - /* - * Potentially available timeslices are retrieved - * here - this way the parent does not get penalized - * for creating too many processes. - * - * (this cannot be used to artificially 'generate' - * timeslices, because any timeslice recovered here - * was given away by the parent in the first place.) - */ - current->counter += p->counter; - if (current->counter >= MAX_COUNTER) - current->counter = MAX_COUNTER; - p->pid = 0; - free_task_struct(p); - } else { - printk("task releasing itself\n"); - } + atomic_dec(&p->user->processes); + free_uid(p->user); + unhash_process(p); + + release_thread(p); + current->cmin_flt += p->min_flt + p->cmin_flt; + current->cmaj_flt += p->maj_flt + p->cmaj_flt; + current->cnswap += p->nswap + p->cnswap; + sched_exit(p); + p->pid = 0; + free_task_struct(p); } /* @@ -150,6 +123,79 @@ return retval; } +/** + * reparent_to_init() - Reparent the calling kernel thread to the init task. + * + * If a kernel thread is launched as a result of a system call, or if + * it ever exits, it should generally reparent itself to init so that + * it is correctly cleaned up on exit. + * + * The various task state such as scheduling policy and priority may have + * been inherited from a user process, so we reset them to sane values here. + * + * NOTE that reparent_to_init() gives the caller full capabilities. + */ +void reparent_to_init(void) +{ + write_lock_irq(&tasklist_lock); + + /* Reparent to init */ + REMOVE_LINKS(current); + current->p_pptr = child_reaper; + current->p_opptr = child_reaper; + SET_LINKS(current); + + /* Set the exit signal to SIGCHLD so we signal init on exit */ + current->exit_signal = SIGCHLD; + + current->ptrace = 0; + if ((current->policy == SCHED_OTHER) && (task_nice(current) < 0)) + set_user_nice(current, 0); + /* cpus_allowed? */ + /* rt_priority? */ + /* signals? */ + current->cap_effective = CAP_INIT_EFF_SET; + current->cap_inheritable = CAP_INIT_INH_SET; + current->cap_permitted = CAP_FULL_SET; + current->keep_capabilities = 0; + memcpy(current->rlim, init_task.rlim, sizeof(*(current->rlim))); + current->user = INIT_USER; + + write_unlock_irq(&tasklist_lock); +} + +/* + * Put all the gunge required to become a kernel thread without + * attached user resources in one place where it belongs. + */ + +void daemonize(void) +{ + struct fs_struct *fs; + + + /* + * If we were started as result of loading a module, close all of the + * user space pages. We don't need them, and if we didn't close them + * they would be locked into memory. + */ + exit_mm(current); + + current->session = 1; + current->pgrp = 1; + current->tty = NULL; + + /* Become as one with the init task */ + + exit_fs(current); /* current->fs->count--; */ + fs = init_task.fs; + current->fs = fs; + atomic_inc(&fs->count); + exit_files(current); + current->files = init_task.files; + atomic_inc(¤t->files->count); +} + /* * When we die, we re-parent all our children. * Try to give them to another thread in our thread diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/kernel/fork.c linux.21pre4-ac2/kernel/fork.c --- linux.21pre4/kernel/fork.c 2003-01-29 16:26:32.000000000 +0000 +++ linux.21pre4-ac2/kernel/fork.c 2003-01-28 16:16:56.000000000 +0000 @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -30,7 +31,6 @@ /* The idle threads do not count.. */ int nr_threads; -int nr_running; int max_threads; unsigned long total_forks; /* Handle normal Linux uptimes. */ @@ -38,6 +38,8 @@ struct task_struct *pidhash[PIDHASH_SZ]; +rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */ + void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) { unsigned long flags; @@ -143,6 +145,7 @@ { struct vm_area_struct * mpnt, *tmp, **pprev; int retval; + unsigned long charge = 0; flush_cache_mm(current->mm); mm->locked_vm = 0; @@ -171,6 +174,12 @@ retval = -ENOMEM; if(mpnt->vm_flags & VM_DONTCOPY) continue; + if(mpnt->vm_flags & VM_ACCOUNT) { + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; + if(!vm_enough_memory(len)) + goto fail_nomem; + charge += len; + } tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (!tmp) goto fail_nomem; @@ -214,10 +223,12 @@ } retval = 0; build_mmap_rb(mm); - -fail_nomem: +out: flush_tlb_mm(current->mm); return retval; +fail_nomem: + vm_unacct_memory(charge); + goto out; } spinlock_t mmlist_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; @@ -578,6 +589,7 @@ struct pt_regs *regs, unsigned long stack_size) { int retval; + unsigned long flags; struct task_struct *p; struct completion vfork; @@ -638,8 +650,7 @@ if (p->pid == 0 && current->pid != 0) goto bad_fork_cleanup; - p->run_list.next = NULL; - p->run_list.prev = NULL; + INIT_LIST_HEAD(&p->run_list); p->p_cptr = NULL; init_waitqueue_head(&p->wait_chldexit); @@ -665,14 +676,15 @@ #ifdef CONFIG_SMP { int i; - p->cpus_runnable = ~0UL; - p->processor = current->processor; + /* ?? should we just memset this ?? */ for(i = 0; i < smp_num_cpus; i++) - p->per_cpu_utime[i] = p->per_cpu_stime[i] = 0; + p->per_cpu_utime[cpu_logical_map(i)] = + p->per_cpu_stime[cpu_logical_map(i)] = 0; spin_lock_init(&p->sigmask_lock); } #endif + p->array = NULL; p->lock_depth = -1; /* -1 = no lock */ p->start_time = jiffies; @@ -706,15 +718,27 @@ p->pdeath_signal = 0; /* - * "share" dynamic priority between parent and child, thus the - * total amount of dynamic priorities in the system doesn't change, - * more scheduling fairness. This is only important in the first - * timeslice, on the long run the scheduling behaviour is unchanged. - */ - p->counter = (current->counter + 1) >> 1; - current->counter >>= 1; - if (!current->counter) - current->need_resched = 1; + * Share the timeslice between parent and child, thus the + * total amount of pending timeslices in the system doesnt change, + * resulting in more scheduling fairness. + */ + __save_flags(flags); + __cli(); + if (!current->time_slice) + BUG(); + p->time_slice = (current->time_slice + 1) >> 1; + current->time_slice >>= 1; + if (!current->time_slice) { + /* + * This case is rare, it happens when the parent has only + * a single jiffy left from its timeslice. Taking the + * runqueue lock is not a problem. + */ + current->time_slice = 1; + scheduler_tick(0,0); + } + p->sleep_timestamp = jiffies; + __restore_flags(flags); /* * Ok, add it to the run-queues and make it @@ -750,11 +774,16 @@ if (p->ptrace & PT_PTRACED) send_sig(SIGSTOP, p, 1); - - wake_up_process(p); /* do this last */ + wake_up_forked_process(p); /* do this last */ ++total_forks; if (clone_flags & CLONE_VFORK) wait_for_completion(&vfork); + else + /* + * Let the child process run first, to avoid most of the + * COW overhead when the child exec()s afterwards. + */ + current->need_resched = 1; fork_out: return retval; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/kernel/kmod.c linux.21pre4-ac2/kernel/kmod.c --- linux.21pre4/kernel/kmod.c 2003-01-29 16:26:32.000000000 +0000 +++ linux.21pre4-ac2/kernel/kmod.c 2003-01-30 16:55:56.000000000 +0000 @@ -119,15 +119,8 @@ if (curtask->files->fd[i]) close(i); } - /* Drop the "current user" thing */ - { - struct user_struct *user = curtask->user; - curtask->user = INIT_USER; - atomic_inc(&INIT_USER->__count); - atomic_inc(&INIT_USER->processes); - atomic_dec(&user->processes); - free_uid(user); - } + /* Become root */ + set_user(0, 0); /* Give kmod all effective privileges.. */ curtask->euid = curtask->fsuid = 0; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/kernel/ksyms.c linux.21pre4-ac2/kernel/ksyms.c --- linux.21pre4/kernel/ksyms.c 2003-01-29 17:07:59.000000000 +0000 +++ linux.21pre4-ac2/kernel/ksyms.c 2003-01-06 19:27:27.000000000 +0000 @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -65,6 +66,8 @@ extern int request_dma(unsigned int dmanr, char * deviceID); extern void free_dma(unsigned int dmanr); extern spinlock_t dma_spin_lock; +extern int panic_timeout; + #ifdef CONFIG_MODVERSIONS const struct module_symbol __export_Using_Versions @@ -110,6 +113,7 @@ EXPORT_SYMBOL(kfree); EXPORT_SYMBOL(vfree); EXPORT_SYMBOL(__vmalloc); +EXPORT_SYMBOL(vcalloc); EXPORT_SYMBOL(vmalloc_to_page); EXPORT_SYMBOL(mem_map); EXPORT_SYMBOL(remap_page_range); @@ -444,7 +448,9 @@ /* process management */ EXPORT_SYMBOL(complete_and_exit); EXPORT_SYMBOL(__wake_up); -EXPORT_SYMBOL(__wake_up_sync); +#if CONFIG_SMP +EXPORT_SYMBOL_GPL(__wake_up_sync); /* internal use only */ +#endif EXPORT_SYMBOL(wake_up_process); EXPORT_SYMBOL(sleep_on); EXPORT_SYMBOL(sleep_on_timeout); @@ -454,6 +460,12 @@ EXPORT_SYMBOL(schedule_timeout); EXPORT_SYMBOL(yield); EXPORT_SYMBOL(__cond_resched); +EXPORT_SYMBOL(set_user_nice); +EXPORT_SYMBOL(task_nice); +EXPORT_SYMBOL_GPL(idle_cpu); +#ifdef CONFIG_SMP +EXPORT_SYMBOL_GPL(set_cpus_allowed); +#endif EXPORT_SYMBOL(jiffies); EXPORT_SYMBOL(xtime); EXPORT_SYMBOL(do_gettimeofday); @@ -464,10 +476,12 @@ #endif EXPORT_SYMBOL(kstat); -EXPORT_SYMBOL(nr_running); +EXPORT_SYMBOL(nr_context_switches); /* misc */ EXPORT_SYMBOL(panic); +EXPORT_SYMBOL(panic_notifier_list); +EXPORT_SYMBOL(panic_timeout); EXPORT_SYMBOL(__out_of_line_bug); EXPORT_SYMBOL(sprintf); EXPORT_SYMBOL(snprintf); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/kernel/Makefile linux.21pre4-ac2/kernel/Makefile --- linux.21pre4/kernel/Makefile 2003-01-29 16:26:32.000000000 +0000 +++ linux.21pre4-ac2/kernel/Makefile 2003-01-06 19:27:43.000000000 +0000 @@ -9,7 +9,7 @@ O_TARGET := kernel.o -export-objs = signal.o sys.o kmod.o context.o ksyms.o pm.o exec_domain.o printk.o +export-objs = signal.o sys.o kmod.o context.o ksyms.o pm.o exec_domain.o printk.o cpufreq.o obj-y = sched.o dma.o fork.o exec_domain.o panic.o printk.o \ module.o exit.o itimer.o info.o time.o softirq.o resource.o \ @@ -19,6 +19,8 @@ obj-$(CONFIG_UID16) += uid16.o obj-$(CONFIG_MODULES) += ksyms.o obj-$(CONFIG_PM) += pm.o +obj-$(CONFIG_CPU_FREQ) += cpufreq.o +obj-$(CONFIG_IKCONFIG) += configs.o ifneq ($(CONFIG_IA64),y) # According to Alan Modra , the -fno-omit-frame-pointer is @@ -26,7 +28,21 @@ # me. I suspect most platforms don't need this, but until we know that for sure # I turn this off for IA-64 only. Andreas Schwab says it's also needed on m68k # to get a correct value for the wait-channel (WCHAN in ps). --davidm +# +# Some gcc's are building so that O(1) scheduler is triple faulting if we +# build -O2. (Turns out to be a CPU issue. Update your microcode if you hit it) +# CFLAGS_sched.o := $(PROFILING) -fno-omit-frame-pointer endif include $(TOPDIR)/Rules.make + +configs.o: $(TOPDIR)/scripts/mkconfigs configs.c + $(CC) $(CFLAGS) $(CFLAGS_KERNEL) -DEXPORT_SYMTAB -c -o configs.o configs.c + +$(TOPDIR)/scripts/mkconfigs: $(TOPDIR)/scripts/mkconfigs.c + $(HOSTCC) $(HOSTCFLAGS) -o $(TOPDIR)/scripts/mkconfigs $(TOPDIR)/scripts/mkconfigs.c + +configs.c: $(TOPDIR)/.config $(TOPDIR)/scripts/mkconfigs + $(TOPDIR)/scripts/mkconfigs $(TOPDIR)/.config configs.c + diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/kernel/panic.c linux.21pre4-ac2/kernel/panic.c --- linux.21pre4/kernel/panic.c 2003-01-29 16:26:32.000000000 +0000 +++ linux.21pre4-ac2/kernel/panic.c 2003-01-06 15:41:58.000000000 +0000 @@ -16,6 +16,8 @@ #include #include #include +#include +#include asmlinkage void sys_sync(void); /* it's really int */ @@ -28,9 +30,155 @@ panic_timeout = simple_strtoul(str, NULL, 0); return 1; } - __setup("panic=", panic_setup); + +#if (defined(CONFIG_X86) && defined(CONFIG_VT)) || defined(CONFIG_PC_KEYB) +#define do_blink(x) pckbd_blink(x) +#else +#define do_blink(x) 0 +#endif + +#ifdef CONFIG_PANIC_MORSE + +static int blink_setting = 1; + +static const unsigned char morsetable[] = { + 0122, 0, 0310, 0, 0, 0163, /* "#$%&' */ + 055, 0155, 0, 0, 0163, 0141, 0152, 0051, /* ()*+,-./ */ + 077, 076, 074, 070, 060, 040, 041, 043, 047, 057, /* 0-9 */ + 0107, 0125, 0, 0061, 0, 0114, 0, /* :;<=>?@ */ + 006, 021, 025, 011, 002, 024, 013, 020, 004, /* A-I */ + 036, 015, 022, 007, 005, 017, 026, 033, 012, /* J-R */ + 010, 003, 014, 030, 016, 031, 035, 023, /* S-Z */ + 0, 0, 0, 0, 0154 /* [\]^_ */ +}; + +__inline__ unsigned char tomorse(char c) { + if (c >= 'a' && c <= 'z') + c = c - 'a' + 'A'; + if (c >= '"' && c <= '_') { + return morsetable[c - '"']; + } else + return 0; +} + + +#define DITLEN (HZ / 5) +#define DAHLEN 3 * DITLEN +#define SPACELEN 7 * DITLEN + +#define FREQ 844 + +/* Tell the user who may be running in X and not see the console that we have + panic'ed. This is to distingush panics from "real" lockups. + Could in theory send the panic message as morse, but that is left as an + exercise for the reader. + And now it's done! LED and speaker morse code by Andrew Rodland + , with improvements based on suggestions from + linux@horizon.com and a host of others. +*/ + +void panic_blink(char *buf) +{ + static unsigned long next_jiffie = 0; + static char * bufpos = 0; + static unsigned char morse = 0; + static char state = 1; + + if (!blink_setting) + return; + + if (!buf) + buf="Panic lost?"; + + + if (bufpos && time_after (next_jiffie, jiffies)) { + return; /* Waiting for something. */ + } + + if (state) { /* Coming off of a blink. */ + if (blink_setting & 0x01) + do_blink(0); + + state = 0; + + if(morse > 1) { /* Not done yet, just a one-dit pause. */ + next_jiffie = jiffies + DITLEN; + } else { /* Get a new char, and figure out how much space. */ + + if(!bufpos) + bufpos = (char *)buf; /* First time through */ + + if(!*bufpos) { + bufpos = (char *)buf; /* Repeating */ + next_jiffie = jiffies + SPACELEN; + } else { + /* Inter-letter space */ + next_jiffie = jiffies + DAHLEN; + } + + if (! (morse = tomorse(*bufpos) )) { + next_jiffie = jiffies + SPACELEN; + state = 1; /* And get us back here */ + } + bufpos ++; + } + } else { /* Starting a new blink. We have valid code in morse. */ + int len; + + len = (morse & 001) ? DAHLEN : DITLEN; + + if (blink_setting & 0x02) + kd_mksound(FREQ, len); + + next_jiffie = jiffies + len; + + if (blink_setting & 0x01) + do_blink(1); + state = 1; + morse >>= 1; + } +} + +#else /* CONFIG_PANIC_MORSE */ + +static int blink_setting = HZ / 2; /* Over here, it's jiffies between blinks. */ + +/* This is the "original" 2.4-ac panic_blink, rewritten to use my + * sorta-arch-independent do_blink stuff. + */ +void panic_blink(char *buf) { + static char state = 0; + static unsigned long next_jiffie = 0; + + if (!blink_setting) + return; + + if (jiffies >= next_jiffie) { + state ^= 1; + do_blink(state); + next_jiffie = jiffies + blink_setting; + } + + return; +} + +#endif /* CONFIG_PANIC_MORSE */ + +static int __init panicblink_setup(char *str) +{ + int par; + if (get_option(&str,&par)) + blink_setting = par; + return 1; +} + +/* panicblink=0 disables the blinking as it caused problems with some console + switches. */ +__setup("panicblink=", panicblink_setup); + + /** * panic - halt the system * @fmt: The text string to print @@ -96,10 +244,7 @@ #endif sti(); for(;;) { -#if defined(CONFIG_X86) && defined(CONFIG_VT) - extern void panic_blink(void); - panic_blink(); -#endif + panic_blink(buf); CHECK_EMERGENCY_SYNC } } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/kernel/printk.c linux.21pre4-ac2/kernel/printk.c --- linux.21pre4/kernel/printk.c 2003-01-29 17:07:59.000000000 +0000 +++ linux.21pre4-ac2/kernel/printk.c 2003-01-29 17:26:02.000000000 +0000 @@ -26,6 +26,7 @@ #include #include /* For in_interrupt() */ #include +#include #include @@ -529,6 +530,7 @@ if (must_wake_klogd && !oops_in_progress) wake_up_interruptible(&log_wait); } +EXPORT_SYMBOL(release_console_sem); /** console_conditional_schedule - yield the CPU if required * @@ -556,7 +558,14 @@ { struct console *c; - acquire_console_sem(); + /* + * Try to get the console semaphore. If someone else owns it + * we have to return without unblanking because console_unblank + * may be called in interrupt context. + */ + if (down_trylock(&console_sem) != 0) + return; + console_may_schedule = 0; for (c = console_drivers; c != NULL; c = c->next) if ((c->flags & CON_ENABLED) && c->unblank) c->unblank(); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/kernel/ptrace.c linux.21pre4-ac2/kernel/ptrace.c --- linux.21pre4/kernel/ptrace.c 2003-01-29 16:26:32.000000000 +0000 +++ linux.21pre4-ac2/kernel/ptrace.c 2003-01-06 19:27:43.000000000 +0000 @@ -31,20 +31,7 @@ if (child->state != TASK_STOPPED) return -ESRCH; #ifdef CONFIG_SMP - /* Make sure the child gets off its CPU.. */ - for (;;) { - task_lock(child); - if (!task_has_cpu(child)) - break; - task_unlock(child); - do { - if (child->state != TASK_STOPPED) - return -ESRCH; - barrier(); - cpu_relax(); - } while (task_has_cpu(child)); - } - task_unlock(child); + wait_task_inactive(child); #endif } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/kernel/sched.c linux.21pre4-ac2/kernel/sched.c --- linux.21pre4/kernel/sched.c 2003-01-29 17:07:59.000000000 +0000 +++ linux.21pre4-ac2/kernel/sched.c 2003-01-30 16:55:56.000000000 +0000 @@ -1,760 +1,1052 @@ /* - * linux/kernel/sched.c + * kernel/sched.c * * Kernel scheduler and related syscalls * - * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 1991-2002 Linus Torvalds * * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and * make semaphores SMP safe * 1998-11-19 Implemented schedule_timeout() and related stuff * by Andrea Arcangeli - * 1998-12-28 Implemented better SMP scheduling by Ingo Molnar + * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: + * hybrid priority-list and round-robin design with + * an array-switch method of distributing timeslices + * and per-CPU runqueues. Additional code by Davide + * Libenzi, Robert Love, and Rusty Russell. */ -/* - * 'sched.c' is the main kernel file. It contains scheduling primitives - * (sleep_on, wakeup, schedule etc) as well as a number of simple system - * call functions (type getpid()), which just extract a field from - * current-task - */ - -#include #include +#include #include +#include +#include #include -#include +#include #include -#include #include -#include -#include +#include -#include -#include +/* + * Convert user-nice values [ -20 ... 0 ... 19 ] + * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], + * and back. + */ +#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20) +#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20) +#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio) -extern void timer_bh(void); -extern void tqueue_bh(void); -extern void immediate_bh(void); +/* + * 'User priority' is the nice value converted to something we + * can work with better when scaling various scheduler parameters, + * it's a [ 0 ... 39 ] range. + */ +#define USER_PRIO(p) ((p)-MAX_RT_PRIO) +#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) +#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) /* - * scheduler variables + * These are the 'tuning knobs' of the scheduler: + * + * Minimum timeslice is 10 msecs, default timeslice is 150 msecs, + * maximum timeslice is 300 msecs. Timeslices get refilled after + * they expire. */ +#define MIN_TIMESLICE ( 10 * HZ / 1000) +#define MAX_TIMESLICE (300 * HZ / 1000) +#define CHILD_PENALTY 95 +#define PARENT_PENALTY 100 +#define EXIT_WEIGHT 3 +#define PRIO_BONUS_RATIO 25 +#define INTERACTIVE_DELTA 2 +#define MAX_SLEEP_AVG (2*HZ) +#define STARVATION_LIMIT (2*HZ) + +/* + * If a task is 'interactive' then we reinsert it in the active + * array after it has expired its current timeslice. (it will not + * continue to run immediately, it will still roundrobin with + * other interactive tasks.) + * + * This part scales the interactivity limit depending on niceness. + * + * We scale it linearly, offset by the INTERACTIVE_DELTA delta. + * Here are a few examples of different nice levels: + * + * TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0] + * TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0] + * TASK_INTERACTIVE( 0): [1,1,1,1,0,0,0,0,0,0,0] + * TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0] + * TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0] + * + * (the X axis represents the possible -5 ... 0 ... +5 dynamic + * priority range a task can explore, a value of '1' means the + * task is rated interactive.) + * + * Ie. nice +19 tasks can never get 'interactive' enough to be + * reinserted into the active array. And only heavily CPU-hog nice -20 + * tasks will be expired. Default nice 0 tasks are somewhere between, + * it takes some effort for them to get interactive, but it's not + * too hard. + */ + +#define SCALE(v1,v1_max,v2_max) \ + (v1) * (v2_max) / (v1_max) -unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */ +#define DELTA(p) \ + (SCALE(TASK_NICE(p), 40, MAX_USER_PRIO*PRIO_BONUS_RATIO/100) + \ + INTERACTIVE_DELTA) -extern void mem_use(void); +#define TASK_INTERACTIVE(p) \ + ((p)->prio <= (p)->static_prio - DELTA(p)) /* - * Scheduling quanta. + * BASE_TIMESLICE scales user-nice values [ -20 ... 19 ] + * to time slice values. * - * NOTE! The unix "nice" value influences how long a process - * gets. The nice value ranges from -20 to +19, where a -20 - * is a "high-priority" task, and a "+10" is a low-priority - * task. + * The higher a thread's priority, the bigger timeslices + * it gets during one round of execution. But even the lowest + * priority thread gets MIN_TIMESLICE worth of execution time. * - * We want the time-slice to be around 50ms or so, so this - * calculation depends on the value of HZ. + * task_timeslice() is the interface that is used by the scheduler. */ -#if HZ < 200 -#define TICK_SCALE(x) ((x) >> 2) -#elif HZ < 400 -#define TICK_SCALE(x) ((x) >> 1) -#elif HZ < 800 -#define TICK_SCALE(x) (x) -#elif HZ < 1600 -#define TICK_SCALE(x) ((x) << 1) -#else -#define TICK_SCALE(x) ((x) << 2) -#endif - -#define NICE_TO_TICKS(nice) (TICK_SCALE(20-(nice))+1) +#define BASE_TIMESLICE(p) (MIN_TIMESLICE + \ + ((MAX_TIMESLICE - MIN_TIMESLICE) * \ + (MAX_PRIO-1-(p)->static_prio)/(MAX_USER_PRIO - 1))) +static inline unsigned int task_timeslice(task_t *p) +{ + return BASE_TIMESLICE(p); +} /* - * Init task must be ok at boot for the ix86 as we will check its signals - * via the SMP irq return path. + * These are the runqueue data structures: */ - -struct task_struct * init_tasks[NR_CPUS] = {&init_task, }; + +#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long)) + +typedef struct runqueue runqueue_t; + +struct prio_array { + int nr_active; + unsigned long bitmap[BITMAP_SIZE]; + struct list_head queue[MAX_PRIO]; +}; /* - * The tasklist_lock protects the linked list of processes. + * This is the main, per-CPU runqueue data structure. * - * The runqueue_lock locks the parts that actually access - * and change the run-queues, and have to be interrupt-safe. - * - * If both locks are to be concurrently held, the runqueue_lock - * nests inside the tasklist_lock. - * - * task->alloc_lock nests inside tasklist_lock. + * Locking rule: those places that want to lock multiple runqueues + * (such as the load balancing or the process migration code), lock + * acquire operations must be ordered by ascending &runqueue. */ -spinlock_t runqueue_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; /* inner */ -rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */ +struct runqueue { + spinlock_t lock; + unsigned long nr_running, nr_switches, expired_timestamp, + nr_uninterruptible; + task_t *curr, *idle; + prio_array_t *active, *expired, arrays[2]; + int prev_nr_running[NR_CPUS]; + task_t *migration_thread; + struct list_head migration_queue; +} ____cacheline_aligned; + +static struct runqueue runqueues[NR_CPUS] __cacheline_aligned; + +#define cpu_rq(cpu) (runqueues + (cpu)) +#define this_rq() cpu_rq(smp_processor_id()) +#define task_rq(p) cpu_rq(task_cpu(p)) +#define cpu_curr(cpu) (cpu_rq(cpu)->curr) +#define rt_task(p) ((p)->prio < MAX_RT_PRIO) + +/* + * Default context-switch locking: + */ +#ifndef prepare_arch_schedule +# define prepare_arch_schedule(prev) do { } while(0) +# define finish_arch_schedule(prev) do { } while(0) +# define prepare_arch_switch(rq) do { } while(0) +# define finish_arch_switch(rq) spin_unlock_irq(&(rq)->lock) +#endif -static LIST_HEAD(runqueue_head); /* - * We align per-CPU scheduling data on cacheline boundaries, - * to prevent cacheline ping-pong. + * task_rq_lock - lock the runqueue a given task resides on and disable + * interrupts. Note the ordering: we can safely lookup the task_rq without + * explicitly disabling preemption. */ -static union { - struct schedule_data { - struct task_struct * curr; - cycles_t last_schedule; - } schedule_data; - char __pad [SMP_CACHE_BYTES]; -} aligned_data [NR_CPUS] __cacheline_aligned = { {{&init_task,0}}}; +static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) +{ + struct runqueue *rq; -#define cpu_curr(cpu) aligned_data[(cpu)].schedule_data.curr -#define last_schedule(cpu) aligned_data[(cpu)].schedule_data.last_schedule +repeat_lock_task: + local_irq_save(*flags); + rq = task_rq(p); + spin_lock(&rq->lock); + if (unlikely(rq != task_rq(p))) { + spin_unlock_irqrestore(&rq->lock, *flags); + goto repeat_lock_task; + } + return rq; +} -struct kernel_stat kstat; -extern struct task_struct *child_reaper; +static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) +{ + spin_unlock_irqrestore(&rq->lock, *flags); +} -#ifdef CONFIG_SMP +/* + * rq_lock - lock a given runqueue and disable interrupts. + */ +static inline runqueue_t *this_rq_lock(void) +{ + runqueue_t *rq; -#define idle_task(cpu) (init_tasks[cpu_number_map(cpu)]) -#define can_schedule(p,cpu) \ - ((p)->cpus_runnable & (p)->cpus_allowed & (1UL << cpu)) + local_irq_disable(); + rq = this_rq(); + spin_lock(&rq->lock); -#else + return rq; +} -#define idle_task(cpu) (&init_task) -#define can_schedule(p,cpu) (1) +static inline void rq_unlock(runqueue_t *rq) +{ + spin_unlock_irq(&rq->lock); +} -#endif +/* + * Adding/removing a task to/from a priority array: + */ +static inline void dequeue_task(struct task_struct *p, prio_array_t *array) +{ + array->nr_active--; + list_del(&p->run_list); + if (list_empty(array->queue + p->prio)) + __clear_bit(p->prio, array->bitmap); +} -void scheduling_functions_start_here(void) { } +static inline void enqueue_task(struct task_struct *p, prio_array_t *array) +{ + list_add_tail(&p->run_list, array->queue + p->prio); + __set_bit(p->prio, array->bitmap); + array->nr_active++; + p->array = array; +} /* - * This is the function that decides how desirable a process is.. - * You can weigh different processes against each other depending - * on what CPU they've run on lately etc to try to handle cache - * and TLB miss penalties. + * effective_prio - return the priority that is based on the static + * priority but is modified by bonuses/penalties. + * + * We scale the actual sleep average [0 .... MAX_SLEEP_AVG] + * into the -5 ... 0 ... +5 bonus/penalty range. + * + * We use 25% of the full 0...39 priority range so that: + * + * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs. + * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks. * - * Return values: - * -1000: never select this - * 0: out of time, recalculate counters (but it might still be - * selected) - * +ve: "goodness" value (the larger, the better) - * +1000: realtime process, select this. + * Both properties are important to certain workloads. */ - -static inline int goodness(struct task_struct * p, int this_cpu, struct mm_struct *this_mm) +static inline int effective_prio(task_t *p) { - int weight; + int bonus, prio; - /* - * select the current process after every other - * runnable process, but before the idle thread. - * Also, dont trigger a counter recalculation. - */ - weight = -1; - if (p->policy & SCHED_YIELD) - goto out; + bonus = MAX_USER_PRIO*PRIO_BONUS_RATIO*p->sleep_avg/MAX_SLEEP_AVG/100 - + MAX_USER_PRIO*PRIO_BONUS_RATIO/100/2; - /* - * Non-RT process - normal case first. - */ - if (p->policy == SCHED_OTHER) { + prio = p->static_prio - bonus; + if (prio < MAX_RT_PRIO) + prio = MAX_RT_PRIO; + if (prio > MAX_PRIO-1) + prio = MAX_PRIO-1; + return prio; +} + +/* + * activate_task - move a task to the runqueue. + * + * Also update all the scheduling statistics stuff. (sleep average + * calculation, priority modifiers, etc.) + */ +static inline void activate_task(task_t *p, runqueue_t *rq) +{ + unsigned long sleep_time = jiffies - p->sleep_timestamp; + prio_array_t *array = rq->active; + + if (!rt_task(p) && sleep_time) { /* - * Give the process a first-approximation goodness value - * according to the number of clock-ticks it has left. - * - * Don't do any other calculations if the time slice is - * over.. + * This code gives a bonus to interactive tasks. We update + * an 'average sleep time' value here, based on + * sleep_timestamp. The more time a task spends sleeping, + * the higher the average gets - and the higher the priority + * boost gets as well. */ - weight = p->counter; - if (!weight) - goto out; - -#ifdef CONFIG_SMP - /* Give a largish advantage to the same processor... */ - /* (this is equivalent to penalizing other processors) */ - if (p->processor == this_cpu) - weight += PROC_CHANGE_PENALTY; -#endif - - /* .. and a slight advantage to the current MM */ - if (p->mm == this_mm || !p->mm) - weight += 1; - weight += 20 - p->nice; - goto out; + p->sleep_avg += sleep_time; + if (p->sleep_avg > MAX_SLEEP_AVG) + p->sleep_avg = MAX_SLEEP_AVG; + p->prio = effective_prio(p); } - - /* - * Realtime process, select the first one on the - * runqueue (taking priorities within processes - * into account). - */ - weight = 1000 + p->rt_priority; -out: - return weight; + enqueue_task(p, array); + rq->nr_running++; } /* - * the 'goodness value' of replacing a process on a given CPU. - * positive value means 'replace', zero or negative means 'dont'. + * deactivate_task - remove a task from the runqueue. */ -static inline int preemption_goodness(struct task_struct * prev, struct task_struct * p, int cpu) +static inline void deactivate_task(struct task_struct *p, runqueue_t *rq) { - return goodness(p, cpu, prev->active_mm) - goodness(prev, cpu, prev->active_mm); + rq->nr_running--; + if (p->state == TASK_UNINTERRUPTIBLE) + rq->nr_uninterruptible++; + dequeue_task(p, p->array); + p->array = NULL; } /* - * This is ugly, but reschedule_idle() is very timing-critical. - * We are called with the runqueue spinlock held and we must - * not claim the tasklist_lock. + * resched_task - mark a task 'to be rescheduled now'. + * + * On UP this means the setting of the need_resched flag, on SMP it + * might also involve a cross-CPU call to trigger the scheduler on + * the target CPU. */ -static FASTCALL(void reschedule_idle(struct task_struct * p)); - -static void reschedule_idle(struct task_struct * p) +static inline void resched_task(task_t *p) { #ifdef CONFIG_SMP - int this_cpu = smp_processor_id(); - struct task_struct *tsk, *target_tsk; - int cpu, best_cpu, i, max_prio; - cycles_t oldest_idle; - - /* - * shortcut if the woken up task's last CPU is - * idle now. - */ - best_cpu = p->processor; - if (can_schedule(p, best_cpu)) { - tsk = idle_task(best_cpu); - if (cpu_curr(best_cpu) == tsk) { - int need_resched; -send_now_idle: - /* - * If need_resched == -1 then we can skip sending - * the IPI altogether, tsk->need_resched is - * actively watched by the idle thread. - */ - need_resched = tsk->need_resched; - tsk->need_resched = 1; - if ((best_cpu != this_cpu) && !need_resched) - smp_send_reschedule(best_cpu); - return; - } - } - - /* - * We know that the preferred CPU has a cache-affine current - * process, lets try to find a new idle CPU for the woken-up - * process. Select the least recently active idle CPU. (that - * one will have the least active cache context.) Also find - * the executing process which has the least priority. - */ - oldest_idle = (cycles_t) -1; - target_tsk = NULL; - max_prio = 0; - - for (i = 0; i < smp_num_cpus; i++) { - cpu = cpu_logical_map(i); - if (!can_schedule(p, cpu)) - continue; - tsk = cpu_curr(cpu); - /* - * We use the first available idle CPU. This creates - * a priority list between idle CPUs, but this is not - * a problem. - */ - if (tsk == idle_task(cpu)) { -#if defined(__i386__) && defined(CONFIG_SMP) - /* - * Check if two siblings are idle in the same - * physical package. Use them if found. - */ - if (smp_num_siblings == 2) { - if (cpu_curr(cpu_sibling_map[cpu]) == - idle_task(cpu_sibling_map[cpu])) { - oldest_idle = last_schedule(cpu); - target_tsk = tsk; - break; - } - - } -#endif - if (last_schedule(cpu) < oldest_idle) { - oldest_idle = last_schedule(cpu); - target_tsk = tsk; - } - } else { - if (oldest_idle == -1ULL) { - int prio = preemption_goodness(tsk, p, cpu); - - if (prio > max_prio) { - max_prio = prio; - target_tsk = tsk; - } - } - } - } - tsk = target_tsk; - if (tsk) { - if (oldest_idle != -1ULL) { - best_cpu = tsk->processor; - goto send_now_idle; - } - tsk->need_resched = 1; - if (tsk->processor != this_cpu) - smp_send_reschedule(tsk->processor); - } - return; - + int need_resched; -#else /* UP */ - int this_cpu = smp_processor_id(); - struct task_struct *tsk; - - tsk = cpu_curr(this_cpu); - if (preemption_goodness(tsk, p, this_cpu) > 0) - tsk->need_resched = 1; + need_resched = p->need_resched; + wmb(); + set_tsk_need_resched(p); + if (!need_resched && (task_cpu(p) != smp_processor_id())) + smp_send_reschedule(task_cpu(p)); +#else + set_tsk_need_resched(p); #endif } +#ifdef CONFIG_SMP + /* - * Careful! + * wait_task_inactive - wait for a thread to unschedule. * - * This has to add the process to the _end_ of the - * run-queue, not the beginning. The goodness value will - * determine whether this process will run next. This is - * important to get SCHED_FIFO and SCHED_RR right, where - * a process that is either pre-empted or its time slice - * has expired, should be moved to the tail of the run - * queue for its priority - Bhavesh Davda + * The caller must ensure that the task *will* unschedule sometime soon, + * else this function might spin for a *long* time. */ -static inline void add_to_runqueue(struct task_struct * p) +void wait_task_inactive(task_t * p) { - list_add_tail(&p->run_list, &runqueue_head); - nr_running++; + unsigned long flags; + runqueue_t *rq; + +repeat: + rq = task_rq(p); + if (unlikely(rq->curr == p)) { + cpu_relax(); + goto repeat; + } + rq = task_rq_lock(p, &flags); + if (unlikely(rq->curr == p)) { + task_rq_unlock(rq, &flags); + goto repeat; + } + task_rq_unlock(rq, &flags); } -static inline void move_last_runqueue(struct task_struct * p) +/* + * kick_if_running - kick the remote CPU if the task is running currently. + * + * This code is used by the signal code to signal tasks + * which are in user-mode, as quickly as possible. + * + * (Note that we do this lockless - if the task does anything + * while the message is in flight then it will notice the + * sigpending condition anyway.) + */ +void kick_if_running(task_t * p) { - list_del(&p->run_list); - list_add_tail(&p->run_list, &runqueue_head); + if (p == task_rq(p)->curr) + resched_task(p); } +#endif -/* - * Wake up a process. Put it on the run-queue if it's not - * already there. The "current" process is always on the - * run-queue (except when the actual re-schedule is in - * progress), and as such you're allowed to do the simpler - * "current->state = TASK_RUNNING" to mark yourself runnable - * without the overhead of this. +/*** + * try_to_wake_up - wake up a thread + * @p: the to-be-woken-up thread + * @sync: do a synchronous wakeup? + * + * Put it on the run-queue if it's not already there. The "current" + * thread is always on the run-queue (except when the actual + * re-schedule is in progress), and as such you're allowed to do + * the simpler "current->state = TASK_RUNNING" to mark yourself + * runnable without the overhead of this. + * + * returns failure only if the task is already active. */ -static inline int try_to_wake_up(struct task_struct * p, int synchronous) +static int try_to_wake_up(task_t * p, int sync) { unsigned long flags; int success = 0; + long old_state; + runqueue_t *rq; - /* - * We want the common case fall through straight, thus the goto. - */ - spin_lock_irqsave(&runqueue_lock, flags); +repeat_lock_task: + rq = task_rq_lock(p, &flags); + old_state = p->state; + if (!p->array) { + /* + * Fast-migrate the task if it's not running or runnable + * currently. Do not violate hard affinity. + */ + if (unlikely(sync && (rq->curr != p) && + (task_cpu(p) != smp_processor_id()) && + (p->cpus_allowed & (1UL << smp_processor_id())))) { + + set_task_cpu(p, smp_processor_id()); + task_rq_unlock(rq, &flags); + goto repeat_lock_task; + } + if (old_state == TASK_UNINTERRUPTIBLE) + rq->nr_uninterruptible--; + activate_task(p, rq); + if (p->prio < rq->curr->prio) + resched_task(rq->curr); + success = 1; + } p->state = TASK_RUNNING; - if (task_on_runqueue(p)) - goto out; - add_to_runqueue(p); - if (!synchronous || !(p->cpus_allowed & (1UL << smp_processor_id()))) - reschedule_idle(p); - success = 1; -out: - spin_unlock_irqrestore(&runqueue_lock, flags); + task_rq_unlock(rq, &flags); + return success; } -inline int wake_up_process(struct task_struct * p) +int wake_up_process(task_t * p) { return try_to_wake_up(p, 0); } -static void process_timeout(unsigned long __data) +/* + * wake_up_forked_process - wake up a freshly forked process. + * + * This function will do some initial scheduler statistics housekeeping + * that must be done for every newly created process. + */ +void wake_up_forked_process(task_t * p) { - struct task_struct * p = (struct task_struct *) __data; + runqueue_t *rq = this_rq_lock(); - wake_up_process(p); + p->state = TASK_RUNNING; + if (!rt_task(p)) { + /* + * We decrease the sleep average of forking parents + * and children as well, to keep max-interactive tasks + * from forking tasks that are max-interactive. + */ + current->sleep_avg = current->sleep_avg * PARENT_PENALTY / 100; + p->sleep_avg = p->sleep_avg * CHILD_PENALTY / 100; + p->prio = effective_prio(p); + } + set_task_cpu(p, smp_processor_id()); + activate_task(p, rq); + + rq_unlock(rq); +} + +/* + * Potentially available exiting-child timeslices are + * retrieved here - this way the parent does not get + * penalized for creating too many threads. + * + * (this cannot be used to 'generate' timeslices + * artificially, because any timeslice recovered here + * was given away by the parent in the first place.) + */ +void sched_exit(task_t * p) +{ + __cli(); + current->time_slice += p->time_slice; + if (unlikely(current->time_slice > MAX_TIMESLICE)) + current->time_slice = MAX_TIMESLICE; + __sti(); + /* + * If the child was a (relative-) CPU hog then decrease + * the sleep_avg of the parent as well. + */ + if (p->sleep_avg < current->sleep_avg) + current->sleep_avg = (current->sleep_avg * EXIT_WEIGHT + + p->sleep_avg) / (EXIT_WEIGHT + 1); } /** - * schedule_timeout - sleep until timeout - * @timeout: timeout value in jiffies - * - * Make the current task sleep until @timeout jiffies have - * elapsed. The routine will return immediately unless - * the current task state has been set (see set_current_state()). + * schedule_tail - first thing a freshly forked thread must call. * - * You can set the task state as follows - - * - * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to - * pass before the routine returns. The routine will return 0 - * - * %TASK_INTERRUPTIBLE - the routine may return early if a signal is - * delivered to the current task. In this case the remaining time - * in jiffies will be returned, or 0 if the timer expired in time - * - * The current task state is guaranteed to be TASK_RUNNING when this - * routine returns. - * - * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule - * the CPU away without a bound on the timeout. In this case the return - * value will be %MAX_SCHEDULE_TIMEOUT. - * - * In all cases the return value is guaranteed to be non-negative. + * @prev: the thread we just switched away from. */ -signed long schedule_timeout(signed long timeout) +#if CONFIG_SMP +asmlinkage void schedule_tail(task_t *prev) { - struct timer_list timer; - unsigned long expire; - - switch (timeout) - { - case MAX_SCHEDULE_TIMEOUT: - /* - * These two special cases are useful to be comfortable - * in the caller. Nothing more. We could take - * MAX_SCHEDULE_TIMEOUT from one of the negative value - * but I' d like to return a valid offset (>=0) to allow - * the caller to do everything it want with the retval. - */ - schedule(); - goto out; - default: - /* - * Another bit of PARANOID. Note that the retval will be - * 0 since no piece of kernel is supposed to do a check - * for a negative retval of schedule_timeout() (since it - * should never happens anyway). You just have the printk() - * that will tell you if something is gone wrong and where. - */ - if (timeout < 0) - { - printk(KERN_ERR "schedule_timeout: wrong timeout " - "value %lx from %p\n", timeout, - __builtin_return_address(0)); - current->state = TASK_RUNNING; - goto out; - } - } + finish_arch_switch(this_rq()); + finish_arch_schedule(prev); +} +#endif - expire = timeout + jiffies; +/* + * context_switch - switch to the new MM and the new + * thread's register state. + */ +static inline task_t * context_switch(task_t *prev, task_t *next) +{ + struct mm_struct *mm = next->mm; + struct mm_struct *oldmm = prev->active_mm; - init_timer(&timer); - timer.expires = expire; - timer.data = (unsigned long) current; - timer.function = process_timeout; + if (unlikely(!mm)) { + next->active_mm = oldmm; + atomic_inc(&oldmm->mm_count); + enter_lazy_tlb(oldmm, next, smp_processor_id()); + } else + switch_mm(oldmm, mm, next, smp_processor_id()); - add_timer(&timer); - schedule(); - del_timer_sync(&timer); + if (unlikely(!prev->mm)) { + prev->active_mm = NULL; + mmdrop(oldmm); + } - timeout = expire - jiffies; + /* Here we just switch the register state and the stack. */ + switch_to(prev, next, prev); - out: - return timeout < 0 ? 0 : timeout; + return prev; } /* - * schedule_tail() is getting called from the fork return path. This - * cleans up all remaining scheduler things, without impacting the - * common case. + * nr_running, nr_uninterruptible and nr_context_switches: + * + * externally visible scheduler statistics: current number of runnable + * threads, current number of uninterruptible-sleeping threads, total + * number of context switches performed since bootup. */ -static inline void __schedule_tail(struct task_struct *prev) +unsigned long nr_running(void) { -#ifdef CONFIG_SMP - int policy; - - /* - * prev->policy can be written from here only before `prev' - * can be scheduled (before setting prev->cpus_runnable to ~0UL). - * Of course it must also be read before allowing prev - * to be rescheduled, but since the write depends on the read - * to complete, wmb() is enough. (the spin_lock() acquired - * before setting cpus_runnable is not enough because the spin_lock() - * common code semantics allows code outside the critical section - * to enter inside the critical section) - */ - policy = prev->policy; - prev->policy = policy & ~SCHED_YIELD; - wmb(); + unsigned long i, sum = 0; - /* - * fast path falls through. We have to clear cpus_runnable before - * checking prev->state to avoid a wakeup race. Protect against - * the task exiting early. - */ - task_lock(prev); - task_release_cpu(prev); - mb(); - if (prev->state == TASK_RUNNING) - goto needs_resched; + for (i = 0; i < smp_num_cpus; i++) + sum += cpu_rq(cpu_logical_map(i))->nr_running; -out_unlock: - task_unlock(prev); /* Synchronise here with release_task() if prev is TASK_ZOMBIE */ - return; + return sum; +} - /* - * Slow path - we 'push' the previous process and - * reschedule_idle() will attempt to find a new - * processor for it. (but it might preempt the - * current process as well.) We must take the runqueue - * lock and re-check prev->state to be correct. It might - * still happen that this process has a preemption - * 'in progress' already - but this is not a problem and - * might happen in other circumstances as well. - */ -needs_resched: - { - unsigned long flags; +unsigned long nr_uninterruptible(void) +{ + unsigned long i, sum = 0; - /* - * Avoid taking the runqueue lock in cases where - * no preemption-check is necessery: - */ - if ((prev == idle_task(smp_processor_id())) || - (policy & SCHED_YIELD)) - goto out_unlock; + for (i = 0; i < smp_num_cpus; i++) + sum += cpu_rq(cpu_logical_map(i))->nr_uninterruptible; - spin_lock_irqsave(&runqueue_lock, flags); - if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev)) - reschedule_idle(prev); - spin_unlock_irqrestore(&runqueue_lock, flags); - goto out_unlock; - } -#else - prev->policy &= ~SCHED_YIELD; -#endif /* CONFIG_SMP */ + return sum; } -asmlinkage void schedule_tail(struct task_struct *prev) +unsigned long nr_context_switches(void) { - __schedule_tail(prev); + unsigned long i, sum = 0; + + for (i = 0; i < smp_num_cpus; i++) + sum += cpu_rq(cpu_logical_map(i))->nr_switches; + + return sum; } /* - * 'schedule()' is the scheduler function. It's a very simple and nice - * scheduler: it's not perfect, but certainly works for most things. - * - * The goto is "interesting". + * double_rq_lock - safely lock two runqueues * - * NOTE!! Task 0 is the 'idle' task, which gets called when no other - * tasks can run. It can not be killed, and it cannot sleep. The 'state' - * information in task[0] is never used. + * Note this does not disable interrupts like task_rq_lock, + * you need to do so manually before calling. */ -asmlinkage void schedule(void) +static inline void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) { - struct schedule_data * sched_data; - struct task_struct *prev, *next, *p; - struct list_head *tmp; - int this_cpu, c; - - - spin_lock_prefetch(&runqueue_lock); + if (rq1 == rq2) + spin_lock(&rq1->lock); + else { + if (rq1 < rq2) { + spin_lock(&rq1->lock); + spin_lock(&rq2->lock); + } else { + spin_lock(&rq2->lock); + spin_lock(&rq1->lock); + } + } +} - BUG_ON(!current->active_mm); -need_resched_back: - prev = current; - this_cpu = prev->processor; +/* + * double_rq_unlock - safely unlock two runqueues + * + * Note this does not restore interrupts like task_rq_unlock, + * you need to do so manually after calling. + */ +static inline void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2) +{ + spin_unlock(&rq1->lock); + if (rq1 != rq2) + spin_unlock(&rq2->lock); +} - if (unlikely(in_interrupt())) { - printk("Scheduling in interrupt\n"); - BUG(); +#if CONFIG_SMP +/* + * double_lock_balance - lock the busiest runqueue + * + * this_rq is locked already. Recalculate nr_running if we have to + * drop the runqueue lock. + */ +static inline unsigned int double_lock_balance(runqueue_t *this_rq, + runqueue_t *busiest, int this_cpu, int idle, unsigned int nr_running) +{ + if (unlikely(!spin_trylock(&busiest->lock))) { + if (busiest < this_rq) { + spin_unlock(&this_rq->lock); + spin_lock(&busiest->lock); + spin_lock(&this_rq->lock); + /* Need to recalculate nr_running */ + if (idle || (this_rq->nr_running > this_rq->prev_nr_running[this_cpu])) + nr_running = this_rq->nr_running; + else + nr_running = this_rq->prev_nr_running[this_cpu]; + } else + spin_lock(&busiest->lock); } + return nr_running; +} - release_kernel_lock(prev, this_cpu); +#include +/* + * Current runqueue is empty, or rebalance tick: if there is an + * inbalance (current runqueue is too short) then pull from + * busiest runqueue(s). + * + * We call this with the current runqueue locked, + * irqs disabled. + */ +static void load_balance(runqueue_t *this_rq, int idle) +{ + int imbalance, nr_running, load, max_load, + idx, i, this_cpu = smp_processor_id(); + task_t *next = this_rq->idle, *tmp; + runqueue_t *busiest, *rq_src; + prio_array_t *array; + struct list_head *head, *curr; + + /* + * Handle platform specific balancing operations, such as + * hyperthreading. + */ + + if(arch_load_balance(this_cpu, idle)) + return; + /* - * 'sched_data' is protected by the fact that we can run - * only one process per CPU. + * We search all runqueues to find the most busy one. + * We do this lockless to reduce cache-bouncing overhead, + * we re-check the 'best' source CPU later on again, with + * the lock held. + * + * We fend off statistical fluctuations in runqueue lengths by + * saving the runqueue length during the previous load-balancing + * operation and using the smaller one the current and saved lengths. + * If a runqueue is long enough for a longer amount of time then + * we recognize it and pull tasks from it. + * + * The 'current runqueue length' is a statistical maximum variable, + * for that one we take the longer one - to avoid fluctuations in + * the other direction. So for a load-balance to happen it needs + * stable long runqueue on the target CPU and stable short runqueue + * on the local runqueue. + * + * We make an exception if this CPU is about to become idle - in + * that case we are less picky about moving a task across CPUs and + * take what can be taken. */ - sched_data = & aligned_data[this_cpu].schedule_data; + if (idle || (this_rq->nr_running > this_rq->prev_nr_running[this_cpu])) + nr_running = this_rq->nr_running; + else + nr_running = this_rq->prev_nr_running[this_cpu]; - spin_lock_irq(&runqueue_lock); + busiest = NULL; + max_load = 1; + for (i = 0; i < smp_num_cpus; i++) { + int logical = cpu_logical_map(i); - /* move an exhausted RR process to be last.. */ - if (unlikely(prev->policy == SCHED_RR)) - if (!prev->counter) { - prev->counter = NICE_TO_TICKS(prev->nice); - move_last_runqueue(prev); + rq_src = cpu_rq(logical); + if (idle || (rq_src->nr_running < this_rq->prev_nr_running[logical])) + load = rq_src->nr_running; + else + load = this_rq->prev_nr_running[logical]; + this_rq->prev_nr_running[logical] = rq_src->nr_running; + + if ((load > max_load) && (rq_src != this_rq)) { + busiest = rq_src; + max_load = load; } - - switch (prev->state) { - case TASK_INTERRUPTIBLE: - if (signal_pending(prev)) { - prev->state = TASK_RUNNING; - break; - } - default: - del_from_runqueue(prev); - case TASK_RUNNING:; } - prev->need_resched = 0; + if (likely(!busiest)) + return; + + imbalance = (max_load - nr_running) / 2; + + /* It needs an at least ~25% imbalance to trigger balancing. */ + if (!idle && (imbalance < (max_load + 3)/4)) + return; + + nr_running = double_lock_balance(this_rq, busiest, this_cpu, idle, nr_running); /* - * this is the scheduler proper: + * Make sure nothing changed since we checked the + * runqueue length. */ + if (busiest->nr_running <= nr_running + 1) + goto out_unlock; -repeat_schedule: /* - * Default process to select.. + * We first consider expired tasks. Those will likely not be + * executed in the near future, and they are most likely to + * be cache-cold, thus switching CPUs has the least effect + * on them. */ - next = idle_task(this_cpu); - c = -1000; - list_for_each(tmp, &runqueue_head) { - p = list_entry(tmp, struct task_struct, run_list); - if (can_schedule(p, this_cpu)) { - int weight = goodness(p, this_cpu, prev->active_mm); - if (weight > c) - c = weight, next = p; + if (busiest->expired->nr_active) + array = busiest->expired; + else + array = busiest->active; + +new_array: + /* Start searching at priority 0: */ + idx = 0; +skip_bitmap: + if (!idx) + idx = sched_find_first_bit(array->bitmap); + else + idx = find_next_bit(array->bitmap, MAX_PRIO, idx); + if (idx == MAX_PRIO) { + if (array == busiest->expired) { + array = busiest->active; + goto new_array; } + goto out_unlock; } - /* Do we need to re-calculate counters? */ - if (unlikely(!c)) { - struct task_struct *p; - - spin_unlock_irq(&runqueue_lock); - read_lock(&tasklist_lock); - for_each_task(p) - p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice); - read_unlock(&tasklist_lock); - spin_lock_irq(&runqueue_lock); - goto repeat_schedule; + head = array->queue + idx; + curr = head->prev; +skip_queue: + tmp = list_entry(curr, task_t, run_list); + + /* + * We do not migrate tasks that are: + * 1) running (obviously), or + * 2) cannot be migrated to this CPU due to cpus_allowed, or + * 3) are cache-hot on their current CPU. + */ + +#define CAN_MIGRATE_TASK(p,rq,this_cpu) \ + ((jiffies - (p)->sleep_timestamp > cache_decay_ticks) && \ + ((p) != (rq)->curr) && \ + ((p)->cpus_allowed & (1UL << (this_cpu)))) + + if (!CAN_MIGRATE_TASK(tmp, busiest, this_cpu)) { + curr = curr->next; + if (curr != head) + goto skip_queue; + idx++; + goto skip_bitmap; + } + next = tmp; + /* + * take the task out of the other runqueue and + * put it into this one: + */ + dequeue_task(next, array); + busiest->nr_running--; + set_task_cpu(next, this_cpu); + this_rq->nr_running++; + enqueue_task(next, this_rq->active); + if (next->prio < current->prio) + set_need_resched(); + if (!idle && --imbalance) { + if (array == busiest->expired) { + array = busiest->active; + goto new_array; + } } +out_unlock: + spin_unlock(&busiest->lock); +} - /* - * from this point on nothing can prevent us from - * switching to the next task, save this fact in - * sched_data. - */ - sched_data->curr = next; - task_set_cpu(next, this_cpu); - spin_unlock_irq(&runqueue_lock); +/* + * One of the idle_cpu_tick() and busy_cpu_tick() functions will + * get called every timer tick, on every CPU. Our balancing action + * frequency and balancing agressivity depends on whether the CPU is + * idle or not. + * + * busy-rebalance every 250 msecs. idle-rebalance every 1 msec. (or on + * systems with HZ=100, every 10 msecs.) + */ +#define BUSY_REBALANCE_TICK (HZ/4 ?: 1) +#define IDLE_REBALANCE_TICK (HZ/1000 ?: 1) - if (unlikely(prev == next)) { - /* We won't go through the normal tail, so do this by hand */ - prev->policy &= ~SCHED_YIELD; - goto same_process; - } +static inline void idle_tick(void) +{ + if (jiffies % IDLE_REBALANCE_TICK) + return; + spin_lock(&this_rq()->lock); + load_balance(this_rq(), 1); + spin_unlock(&this_rq()->lock); +} -#ifdef CONFIG_SMP - /* - * maintain the per-process 'last schedule' value. - * (this has to be recalculated even if we reschedule to - * the same process) Currently this is only used on SMP, - * and it's approximate, so we do not have to maintain - * it while holding the runqueue spinlock. - */ - sched_data->last_schedule = get_cycles(); +#endif - /* - * We drop the scheduler lock early (it's a global spinlock), - * thus we have to lock the previous process from getting - * rescheduled during switch_to(). - */ +/* + * We place interactive tasks back into the active array, if possible. + * + * To guarantee that this does not starve expired tasks we ignore the + * interactivity of a task if the first expired task had to wait more + * than a 'reasonable' amount of time. This deadline timeout is + * load-dependent, as the frequency of array switched decreases with + * increasing number of running tasks: + */ +#define EXPIRED_STARVING(rq) \ + ((rq)->expired_timestamp && \ + (jiffies - (rq)->expired_timestamp >= \ + STARVATION_LIMIT * ((rq)->nr_running) + 1)) -#endif /* CONFIG_SMP */ +/* + * This function gets called by the timer code, with HZ frequency. + * We call it with interrupts disabled. + */ +void scheduler_tick(int user_tick, int system) +{ + int cpu = smp_processor_id(); + runqueue_t *rq = this_rq(); + task_t *p = current; - kstat.context_swtch++; - /* - * there are 3 processes which are affected by a context switch: - * - * prev == .... ==> (last => next) - * - * It's the 'much more previous' 'prev' that is on next's stack, - * but prev is set to (the just run) 'last' process by switch_to(). - * This might sound slightly confusing but makes tons of sense. - */ - prepare_to_switch(); - { - struct mm_struct *mm = next->mm; - struct mm_struct *oldmm = prev->active_mm; - if (!mm) { - BUG_ON(next->active_mm); - next->active_mm = oldmm; - atomic_inc(&oldmm->mm_count); - enter_lazy_tlb(oldmm, next, this_cpu); - } else { - BUG_ON(next->active_mm != mm); - switch_mm(oldmm, mm, next, this_cpu); + if (p == rq->idle) { + if (local_bh_count(cpu) || local_irq_count(cpu) > 1) + kstat.per_cpu_system[cpu] += system; +#if CONFIG_SMP + idle_tick(); +#endif + return; + } + if (TASK_NICE(p) > 0) + kstat.per_cpu_nice[cpu] += user_tick; + else + kstat.per_cpu_user[cpu] += user_tick; + kstat.per_cpu_system[cpu] += system; + + /* Task might have expired already, but not scheduled off yet */ + if (p->array != rq->active) { + set_tsk_need_resched(p); + return; + } + spin_lock(&rq->lock); + if (unlikely(rt_task(p))) { + /* + * RR tasks need a special form of timeslice management. + * FIFO tasks have no timeslices. + */ + if ((p->policy == SCHED_RR) && !--p->time_slice) { + p->time_slice = task_timeslice(p); + set_tsk_need_resched(p); + + /* put it at the end of the queue: */ + dequeue_task(p, rq->active); + enqueue_task(p, rq->active); } + goto out; + } + /* + * The task was running during this tick - update the + * time slice counter and the sleep average. Note: we + * do not update a process's priority until it either + * goes to sleep or uses up its timeslice. This makes + * it possible for interactive tasks to use up their + * timeslices at their highest priority levels. + */ + if (p->sleep_avg) + p->sleep_avg--; + if (!--p->time_slice) { + dequeue_task(p, rq->active); + set_tsk_need_resched(p); + p->prio = effective_prio(p); + p->time_slice = task_timeslice(p); + + if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) { + if (!rq->expired_timestamp) + rq->expired_timestamp = jiffies; + enqueue_task(p, rq->expired); + } else + enqueue_task(p, rq->active); + } +out: +#if CONFIG_SMP + if (!(jiffies % BUSY_REBALANCE_TICK)) + load_balance(rq, 0); +#endif + spin_unlock(&rq->lock); +} + +void scheduling_functions_start_here(void) { } + +/* + * schedule() is the main scheduler function. + */ +asmlinkage void schedule(void) +{ + task_t *prev, *next; + runqueue_t *rq; + prio_array_t *array; + struct list_head *queue; + int idx; + + if (unlikely(in_interrupt())) + BUG(); + +need_resched: + prev = current; + rq = this_rq(); + + release_kernel_lock(prev, smp_processor_id()); + prepare_arch_schedule(prev); + prev->sleep_timestamp = jiffies; + spin_lock_irq(&rq->lock); - if (!prev->mm) { - prev->active_mm = NULL; - mmdrop(oldmm); + switch (prev->state) { + case TASK_INTERRUPTIBLE: + if (unlikely(signal_pending(prev))) { + prev->state = TASK_RUNNING; + break; } + default: + deactivate_task(prev, rq); + case TASK_RUNNING: + ; + } +#if CONFIG_SMP +pick_next_task: +#endif + if (unlikely(!rq->nr_running)) { +#if CONFIG_SMP + load_balance(rq, 1); + if (rq->nr_running) + goto pick_next_task; +#endif + next = rq->idle; + rq->expired_timestamp = 0; + goto switch_tasks; } - /* - * This just switches the register state and the - * stack. - */ - switch_to(prev, next, prev); - __schedule_tail(prev); + array = rq->active; + if (unlikely(!array->nr_active)) { + /* + * Switch the active and expired arrays. + */ + rq->active = rq->expired; + rq->expired = array; + array = rq->active; + rq->expired_timestamp = 0; + } + + idx = sched_find_first_bit(array->bitmap); + queue = array->queue + idx; + next = list_entry(queue->next, task_t, run_list); + +switch_tasks: + prefetch(next); + clear_tsk_need_resched(prev); + + if (likely(prev != next)) { + rq->nr_switches++; + rq->curr = next; + + prepare_arch_switch(rq); + prev = context_switch(prev, next); + barrier(); + rq = this_rq(); + finish_arch_switch(rq); + } else + spin_unlock_irq(&rq->lock); + finish_arch_schedule(prev); -same_process: reacquire_kernel_lock(current); - if (current->need_resched) - goto need_resched_back; - return; + if (need_resched()) + goto need_resched; } /* - * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just wake everything - * up. If it's an exclusive wakeup (nr_exclusive == small +ve number) then we wake all the - * non-exclusive tasks and one exclusive task. + * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just + * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve + * number) then we wake all the non-exclusive tasks and one exclusive task. * * There are circumstances in which we can try to wake a task which has already - * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns zero - * in this (rare) case, and we handle it by contonuing to scan the queue. + * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns + * zero in this (rare) case, and we handle it by continuing to scan the queue. */ -static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode, - int nr_exclusive, const int sync) +static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync) { struct list_head *tmp; - struct task_struct *p; - - CHECK_MAGIC_WQHEAD(q); - WQ_CHECK_LIST_HEAD(&q->task_list); - - list_for_each(tmp,&q->task_list) { - unsigned int state; - wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list); + unsigned int state; + wait_queue_t *curr; + task_t *p; - CHECK_MAGIC(curr->__magic); + list_for_each(tmp, &q->task_list) { + curr = list_entry(tmp, wait_queue_t, task_list); p = curr->task; state = p->state; - if (state & mode) { - WQ_NOTE_WAKER(curr); - if (try_to_wake_up(p, sync) && (curr->flags&WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) + if ((state & mode) && try_to_wake_up(p, sync) && + ((curr->flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)) break; - } } } -void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr) +/** + * __wake_up - wake up threads blocked on a waitqueue. + * @q: the waitqueue + * @mode: which threads + * @nr_exclusive: how many wake-one or wake-many threads to wake up + */ +void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) { - if (q) { - unsigned long flags; - wq_read_lock_irqsave(&q->lock, flags); - __wake_up_common(q, mode, nr, 0); - wq_read_unlock_irqrestore(&q->lock, flags); - } + unsigned long flags; + + if (unlikely(!q)) + return; + + spin_lock_irqsave(&q->lock, flags); + __wake_up_common(q, mode, nr_exclusive, 0); + spin_unlock_irqrestore(&q->lock, flags); } -void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr) +#if CONFIG_SMP + +/** + * __wake_up - sync- wake up threads blocked on a waitqueue. + * @q: the waitqueue + * @mode: which threads + * @nr_exclusive: how many wake-one or wake-many threads to wake up + * + * The sync wakeup differs that the waker knows that it will schedule + * away soon, so while the target thread will be woken up, it will not + * be migrated to another CPU - ie. the two threads are 'synchronized' + * with each other. This can prevent needless bouncing between CPUs. + */ +void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) { - if (q) { - unsigned long flags; - wq_read_lock_irqsave(&q->lock, flags); - __wake_up_common(q, mode, nr, 1); - wq_read_unlock_irqrestore(&q->lock, flags); - } + unsigned long flags; + + if (unlikely(!q)) + return; + + spin_lock_irqsave(&q->lock, flags); + if (likely(nr_exclusive)) + __wake_up_common(q, mode, nr_exclusive, 1); + else + __wake_up_common(q, mode, nr_exclusive, 0); + spin_unlock_irqrestore(&q->lock, flags); } +#endif + void complete(struct completion *x) { unsigned long flags; @@ -790,15 +1082,15 @@ wait_queue_t wait; \ init_waitqueue_entry(&wait, current); -#define SLEEP_ON_HEAD \ - wq_write_lock_irqsave(&q->lock,flags); \ +#define SLEEP_ON_HEAD \ + spin_lock_irqsave(&q->lock,flags); \ __add_wait_queue(q, &wait); \ - wq_write_unlock(&q->lock); + spin_unlock(&q->lock); #define SLEEP_ON_TAIL \ - wq_write_lock_irq(&q->lock); \ + spin_lock_irq(&q->lock); \ __remove_wait_queue(q, &wait); \ - wq_write_unlock_irqrestore(&q->lock,flags); + spin_unlock_irqrestore(&q->lock, flags); void interruptible_sleep_on(wait_queue_head_t *q) { @@ -850,17 +1142,53 @@ void scheduling_functions_end_here(void) { } +void set_user_nice(task_t *p, long nice) +{ + unsigned long flags; + prio_array_t *array; + runqueue_t *rq; + + if (TASK_NICE(p) == nice || nice < -20 || nice > 19) + return; + /* + * We have to be careful, if called from sys_setpriority(), + * the task might be in the middle of scheduling on another CPU. + */ + rq = task_rq_lock(p, &flags); + if (rt_task(p)) { + p->static_prio = NICE_TO_PRIO(nice); + goto out_unlock; + } + array = p->array; + if (array) + dequeue_task(p, array); + p->static_prio = NICE_TO_PRIO(nice); + p->prio = NICE_TO_PRIO(nice); + if (array) { + enqueue_task(p, array); + /* + * If the task is running and lowered its priority, + * or increased its priority then reschedule its CPU: + */ + if ((NICE_TO_PRIO(nice) < p->static_prio) || (p == rq->curr)) + resched_task(rq->curr); + } +out_unlock: + task_rq_unlock(rq, &flags); +} + #ifndef __alpha__ -/* - * This has been replaced by sys_setpriority. Maybe it should be - * moved into the arch dependent tree for those ports that require - * it for backward compatibility? +/** + * sys_nice - change the priority of the current process. + * @increment: priority increment + * + * sys_setpriority is a more generic, but much slower function that + * does similar things. */ - asmlinkage long sys_nice(int increment) { - long newprio; + long nice; /* * Setpriority might change our priority at the same moment. @@ -876,34 +1204,69 @@ if (increment > 40) increment = 40; - newprio = current->nice + increment; - if (newprio < -20) - newprio = -20; - if (newprio > 19) - newprio = 19; - current->nice = newprio; + nice = PRIO_TO_NICE(current->static_prio) + increment; + if (nice < -20) + nice = -20; + if (nice > 19) + nice = 19; + set_user_nice(current, nice); return 0; } #endif -static inline struct task_struct *find_process_by_pid(pid_t pid) +/** + * task_prio - return the priority value of a given task. + * @p: the task in question. + * + * This is the priority value as seen by users in /proc. + * RT tasks are offset by -200. Normal tasks are centered + * around 0, value goes from -16 to +15. + */ +int task_prio(task_t *p) +{ + return p->prio - MAX_USER_RT_PRIO; +} + +/** + * task_nice - return the nice value of a given task. + * @p: the task in question. + */ +int task_nice(task_t *p) +{ + return TASK_NICE(p); +} + +/** + * idle_cpu - is a given cpu idle currently? + * @cpu: the processor in question. + */ +inline int idle_cpu(int cpu) { - struct task_struct *tsk = current; + return cpu_curr(cpu) == cpu_rq(cpu)->idle; +} - if (pid) - tsk = find_task_by_pid(pid); - return tsk; +/** + * find_process_by_pid - find a process with a matching PID value. + * @pid: the pid in question. + */ +static inline task_t *find_process_by_pid(pid_t pid) +{ + return pid ? find_task_by_pid(pid) : current; } -static int setscheduler(pid_t pid, int policy, - struct sched_param *param) +/* + * setscheduler - change the scheduling policy and/or RT priority of a thread. + */ +static int setscheduler(pid_t pid, int policy, struct sched_param *param) { struct sched_param lp; - struct task_struct *p; - int retval; + int retval = -EINVAL; + prio_array_t *array; + unsigned long flags; + runqueue_t *rq; + task_t *p; - retval = -EINVAL; if (!param || pid < 0) goto out_nounlock; @@ -915,14 +1278,19 @@ * We play safe to avoid deadlocks. */ read_lock_irq(&tasklist_lock); - spin_lock(&runqueue_lock); p = find_process_by_pid(pid); retval = -ESRCH; if (!p) - goto out_unlock; - + goto out_unlock_tasklist; + + /* + * To be able to change p->policy safely, the apropriate + * runqueue lock must be held. + */ + rq = task_rq_lock(p, &flags); + if (policy < 0) policy = p->policy; else { @@ -931,56 +1299,78 @@ policy != SCHED_OTHER) goto out_unlock; } - + /* - * Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid - * priority for SCHED_OTHER is 0. + * Valid priorities for SCHED_FIFO and SCHED_RR are + * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_OTHER is 0. */ retval = -EINVAL; - if (lp.sched_priority < 0 || lp.sched_priority > 99) + if (lp.sched_priority < 0 || lp.sched_priority > MAX_USER_RT_PRIO-1) goto out_unlock; if ((policy == SCHED_OTHER) != (lp.sched_priority == 0)) goto out_unlock; retval = -EPERM; - if ((policy == SCHED_FIFO || policy == SCHED_RR) && + if ((policy == SCHED_FIFO || policy == SCHED_RR) && !capable(CAP_SYS_NICE)) goto out_unlock; if ((current->euid != p->euid) && (current->euid != p->uid) && !capable(CAP_SYS_NICE)) goto out_unlock; + array = p->array; + if (array) + deactivate_task(p, task_rq(p)); retval = 0; p->policy = policy; p->rt_priority = lp.sched_priority; - - current->need_resched = 1; + if (policy != SCHED_OTHER) + p->prio = MAX_USER_RT_PRIO-1 - p->rt_priority; + else + p->prio = p->static_prio; + if (array) + activate_task(p, task_rq(p)); out_unlock: - spin_unlock(&runqueue_lock); + task_rq_unlock(rq, &flags); +out_unlock_tasklist: read_unlock_irq(&tasklist_lock); out_nounlock: return retval; } -asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, +/** + * sys_sched_setscheduler - set/change the scheduler policy and RT priority + * @pid: the pid in question. + * @policy: new policy + * @param: structure containing the new RT priority. + */ +asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, struct sched_param *param) { return setscheduler(pid, policy, param); } +/** + * sys_sched_setparam - set/change the RT priority of a thread + * @pid: the pid in question. + * @param: structure containing the new RT priority. + */ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param *param) { return setscheduler(pid, -1, param); } +/** + * sys_sched_getscheduler - get the policy (scheduling class) of a thread + * @pid: the pid in question. + */ asmlinkage long sys_sched_getscheduler(pid_t pid) { - struct task_struct *p; - int retval; + int retval = -EINVAL; + task_t *p; - retval = -EINVAL; if (pid < 0) goto out_nounlock; @@ -988,20 +1378,24 @@ read_lock(&tasklist_lock); p = find_process_by_pid(pid); if (p) - retval = p->policy & ~SCHED_YIELD; + retval = p->policy; read_unlock(&tasklist_lock); out_nounlock: return retval; } +/** + * sys_sched_getparam - get the RT priority of a thread + * @pid: the pid in question. + * @param: sched_param structure containing the RT priority. + */ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param *param) { - struct task_struct *p; struct sched_param lp; - int retval; + int retval = -EINVAL; + task_t *p; - retval = -EINVAL; if (!param || pid < 0) goto out_nounlock; @@ -1026,44 +1420,125 @@ return retval; } -asmlinkage long sys_sched_yield(void) +/** + * sys_sched_setaffinity - set the cpu affinity of a process + * @pid: pid of the process + * @len: length in bytes of the bitmask pointed to by user_mask_ptr + * @user_mask_ptr: user-space pointer to the new cpu mask + */ +asmlinkage int sys_sched_setaffinity(pid_t pid, unsigned int len, + unsigned long *user_mask_ptr) { + unsigned long new_mask; + int retval; + task_t *p; + + if (len < sizeof(new_mask)) + return -EINVAL; + + if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) + return -EFAULT; + + new_mask &= cpu_online_map; + if (!new_mask) + return -EINVAL; + + read_lock(&tasklist_lock); + + p = find_process_by_pid(pid); + if (!p) { + read_unlock(&tasklist_lock); + return -ESRCH; + } + /* - * Trick. sched_yield() first counts the number of truly - * 'pending' runnable processes, then returns if it's - * only the current processes. (This test does not have - * to be atomic.) In threaded applications this optimization - * gets triggered quite often. + * It is not safe to call set_cpus_allowed with the + * tasklist_lock held. We will bump the task_struct's + * usage count and then drop tasklist_lock. */ + get_task_struct(p); + read_unlock(&tasklist_lock); - int nr_pending = nr_running; + retval = -EPERM; + if ((current->euid != p->euid) && (current->euid != p->uid) && + !capable(CAP_SYS_NICE)) + goto out_unlock; -#if CONFIG_SMP - int i; + retval = 0; + set_cpus_allowed(p, new_mask); - // Subtract non-idle processes running on other CPUs. - for (i = 0; i < smp_num_cpus; i++) { - int cpu = cpu_logical_map(i); - if (aligned_data[cpu].schedule_data.curr != idle_task(cpu)) - nr_pending--; - } -#else - // on UP this process is on the runqueue as well - nr_pending--; -#endif - if (nr_pending) { - /* - * This process can only be rescheduled by us, - * so this is safe without any locking. - */ - if (current->policy == SCHED_OTHER) - current->policy |= SCHED_YIELD; - current->need_resched = 1; - - spin_lock_irq(&runqueue_lock); - move_last_runqueue(current); - spin_unlock_irq(&runqueue_lock); +out_unlock: + free_task_struct(p); + return retval; +} + +/** + * sys_sched_getaffinity - get the cpu affinity of a process + * @pid: pid of the process + * @len: length in bytes of the bitmask pointed to by user_mask_ptr + * @user_mask_ptr: user-space pointer to hold the current cpu mask + */ +asmlinkage int sys_sched_getaffinity(pid_t pid, unsigned int len, + unsigned long *user_mask_ptr) +{ + unsigned int real_len; + unsigned long mask; + int retval; + task_t *p; + + real_len = sizeof(mask); + if (len < real_len) + return -EINVAL; + + read_lock(&tasklist_lock); + + retval = -ESRCH; + p = find_process_by_pid(pid); + if (!p) + goto out_unlock; + + retval = 0; + mask = p->cpus_allowed & cpu_online_map; + +out_unlock: + read_unlock(&tasklist_lock); + if (retval) + return retval; + if (copy_to_user(user_mask_ptr, &mask, real_len)) + return -EFAULT; + return real_len; +} + +/** + * sys_sched_yield - yield the current processor to other threads. + * + * this function yields the current CPU by moving the calling thread + * to the expired array. If there are no other threads running on this + * CPU then this function will return. + */ +asmlinkage long sys_sched_yield(void) +{ + runqueue_t *rq = this_rq_lock(); + prio_array_t *array = current->array; + + /* + * We implement yielding by moving the task into the expired + * queue. + * + * (special rule: RT tasks will just roundrobin in the active + * array.) + */ + if (likely(!rt_task(current))) { + dequeue_task(current, array); + enqueue_task(current, rq->expired); + } else { + list_del(¤t->run_list); + list_add_tail(¤t->run_list, array->queue + current->prio); } + spin_unlock(&rq->lock); + + schedule(); + return 0; } @@ -1077,15 +1552,29 @@ { set_current_state(TASK_RUNNING); sys_sched_yield(); - schedule(); } +/** + * _cond_resched - conditionally reschedule + * + * Helper function called if cond_resched inline decides we have + * exceeded the timeslice at this point. We give up the processor + * having made sure we will get it back + */ + void __cond_resched(void) { set_current_state(TASK_RUNNING); schedule(); } +/** + * sys_sched_get_priority_max - return maximum RT priority. + * @policy: scheduling class. + * + * this syscall returns the maximum rt_priority that can be used + * by a given scheduling class. + */ asmlinkage long sys_sched_get_priority_max(int policy) { int ret = -EINVAL; @@ -1093,7 +1582,7 @@ switch (policy) { case SCHED_FIFO: case SCHED_RR: - ret = 99; + ret = MAX_USER_RT_PRIO-1; break; case SCHED_OTHER: ret = 0; @@ -1102,6 +1591,13 @@ return ret; } +/** + * sys_sched_get_priority_min - return minimum RT priority. + * @policy: scheduling class. + * + * this syscall returns the minimum rt_priority that can be used + * by a given scheduling class. + */ asmlinkage long sys_sched_get_priority_min(int policy) { int ret = -EINVAL; @@ -1117,11 +1613,19 @@ return ret; } +/** + * sys_sched_rr_get_interval - return the default timeslice of a process. + * @pid: pid of the process. + * @interval: userspace pointer to the timeslice value. + * + * this syscall writes the default timeslice value of a given process + * into the user-space timespec buffer. A value of '0' means infinity. + */ asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec *interval) { - struct timespec t; - struct task_struct *p; int retval = -EINVAL; + struct timespec t; + task_t *p; if (pid < 0) goto out_nounlock; @@ -1130,8 +1634,8 @@ read_lock(&tasklist_lock); p = find_process_by_pid(pid); if (p) - jiffies_to_timespec(p->policy & SCHED_FIFO ? 0 : NICE_TO_TICKS(p->nice), - &t); + jiffies_to_timespec(p->policy & SCHED_FIFO ? + 0 : task_timeslice(p), &t); read_unlock(&tasklist_lock); if (p) retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; @@ -1139,14 +1643,14 @@ return retval; } -static void show_task(struct task_struct * p) +static void show_task(task_t * p) { unsigned long free = 0; int state; static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" }; printk("%-13.13s ", p->comm); - state = p->state ? ffz(~p->state) + 1 : 0; + state = p->state ? __ffs(p->state) + 1 : 0; if (((unsigned) state) < sizeof(stat_nam)/sizeof(char *)) printk(stat_nam[state]); else @@ -1187,7 +1691,7 @@ printk(" (NOTLB)\n"); { - extern void show_trace_task(struct task_struct *tsk); + extern void show_trace_task(task_t *tsk); show_trace_task(p); } } @@ -1209,7 +1713,7 @@ void show_state(void) { - struct task_struct *p; + task_t *p; #if (BITS_PER_LONG == 32) printk("\n" @@ -1232,128 +1736,241 @@ read_unlock(&tasklist_lock); } -/** - * reparent_to_init() - Reparent the calling kernel thread to the init task. - * - * If a kernel thread is launched as a result of a system call, or if - * it ever exits, it should generally reparent itself to init so that - * it is correctly cleaned up on exit. - * - * The various task state such as scheduling policy and priority may have - * been inherited fro a user process, so we reset them to sane values here. - * - * NOTE that reparent_to_init() gives the caller full capabilities. - */ -void reparent_to_init(void) +void __init init_idle(task_t *idle, int cpu) { - struct task_struct *this_task = current; + runqueue_t *idle_rq = cpu_rq(cpu), *rq = cpu_rq(task_cpu(idle)); + unsigned long flags; + + __save_flags(flags); + __cli(); + double_rq_lock(idle_rq, rq); + + idle_rq->curr = idle_rq->idle = idle; + deactivate_task(idle, rq); + idle->array = NULL; + idle->prio = MAX_PRIO; + idle->state = TASK_RUNNING; + set_task_cpu(idle, cpu); + double_rq_unlock(idle_rq, rq); + set_tsk_need_resched(idle); + __restore_flags(flags); +} - write_lock_irq(&tasklist_lock); +extern void init_timervecs(void); +extern void timer_bh(void); +extern void tqueue_bh(void); +extern void immediate_bh(void); - /* Reparent to init */ - REMOVE_LINKS(this_task); - this_task->p_pptr = child_reaper; - this_task->p_opptr = child_reaper; - SET_LINKS(this_task); +void __init sched_init(void) +{ + runqueue_t *rq; + int i, j, k; - /* Set the exit signal to SIGCHLD so we signal init on exit */ - this_task->exit_signal = SIGCHLD; + for (i = 0; i < NR_CPUS; i++) { + prio_array_t *array; - /* We also take the runqueue_lock while altering task fields - * which affect scheduling decisions */ - spin_lock(&runqueue_lock); + rq = cpu_rq(i); + rq->active = rq->arrays; + rq->expired = rq->arrays + 1; + spin_lock_init(&rq->lock); + INIT_LIST_HEAD(&rq->migration_queue); + + for (j = 0; j < 2; j++) { + array = rq->arrays + j; + for (k = 0; k < MAX_PRIO; k++) { + INIT_LIST_HEAD(array->queue + k); + __clear_bit(k, array->bitmap); + } + // delimiter for bitsearch + __set_bit(MAX_PRIO, array->bitmap); + } + } + /* + * We have to do a little magic to get the first + * process right in SMP mode. + */ + rq = this_rq(); + rq->curr = current; + rq->idle = current; + wake_up_process(current); - this_task->ptrace = 0; - this_task->nice = DEF_NICE; - this_task->policy = SCHED_OTHER; - /* cpus_allowed? */ - /* rt_priority? */ - /* signals? */ - this_task->cap_effective = CAP_INIT_EFF_SET; - this_task->cap_inheritable = CAP_INIT_INH_SET; - this_task->cap_permitted = CAP_FULL_SET; - this_task->keep_capabilities = 0; - memcpy(this_task->rlim, init_task.rlim, sizeof(*(this_task->rlim))); - this_task->user = INIT_USER; + init_timervecs(); + init_bh(TIMER_BH, timer_bh); + init_bh(TQUEUE_BH, tqueue_bh); + init_bh(IMMEDIATE_BH, immediate_bh); - spin_unlock(&runqueue_lock); - write_unlock_irq(&tasklist_lock); + /* + * The boot idle thread does lazy MMU switching as well: + */ + atomic_inc(&init_mm.mm_count); + enter_lazy_tlb(&init_mm, current, smp_processor_id()); } +#if CONFIG_SMP + /* - * Put all the gunge required to become a kernel thread without - * attached user resources in one place where it belongs. + * This is how migration works: + * + * 1) we queue a migration_req_t structure in the source CPU's + * runqueue and wake up that CPU's migration thread. + * 2) we down() the locked semaphore => thread blocks. + * 3) migration thread wakes up (implicitly it forces the migrated + * thread off the CPU) + * 4) it gets the migration request and checks whether the migrated + * task is still in the wrong runqueue. + * 5) if it's in the wrong runqueue then the migration thread removes + * it and puts it into the right queue. + * 6) migration thread up()s the semaphore. + * 7) we wake up and the migration is done. */ -void daemonize(void) +typedef struct { + struct list_head list; + task_t *task; + struct semaphore sem; +} migration_req_t; + +/* + * Change a given task's CPU affinity. Migrate the process to a + * proper CPU and schedule it away if the CPU it's executing on + * is removed from the allowed bitmask. + * + * NOTE: the caller must have a valid reference to the task, the + * task must not exit() & deallocate itself prematurely. The + * call is not atomic; no spinlocks may be held. + */ +void set_cpus_allowed(task_t *p, unsigned long new_mask) { - struct fs_struct *fs; + unsigned long flags; + migration_req_t req; + runqueue_t *rq; + new_mask &= cpu_online_map; + if (!new_mask) + BUG(); + rq = task_rq_lock(p, &flags); + p->cpus_allowed = new_mask; /* - * If we were started as result of loading a module, close all of the - * user space pages. We don't need them, and if we didn't close them - * they would be locked into memory. + * Can the task run on the task's current CPU? If not then + * migrate the process off to a proper CPU. */ - exit_mm(current); - - current->session = 1; - current->pgrp = 1; - current->tty = NULL; - - /* Become as one with the init task */ + if (new_mask & (1UL << task_cpu(p))) { + task_rq_unlock(rq, &flags); + goto out; + } + /* + * If the task is not on a runqueue (and not running), then + * it is sufficient to simply update the task's cpu field. + */ + if (!p->array && (p != rq->curr)) { + set_task_cpu(p, __ffs(p->cpus_allowed)); + task_rq_unlock(rq, &flags); + goto out; + } + init_MUTEX_LOCKED(&req.sem); + req.task = p; + list_add(&req.list, &rq->migration_queue); + task_rq_unlock(rq, &flags); + wake_up_process(rq->migration_thread); - exit_fs(current); /* current->fs->count--; */ - fs = init_task.fs; - current->fs = fs; - atomic_inc(&fs->count); - exit_files(current); - current->files = init_task.files; - atomic_inc(¤t->files->count); + down(&req.sem); +out: + ; } -extern unsigned long wait_init_idle; - -void __init init_idle(void) +/* + * migration_thread - this is a highprio system thread that performs + * thread migration by 'pulling' threads into the target runqueue. + */ +static int migration_thread(void * bind_cpu) { - struct schedule_data * sched_data; - sched_data = &aligned_data[smp_processor_id()].schedule_data; + struct sched_param param = { sched_priority: MAX_RT_PRIO-1 }; + int cpu = cpu_logical_map((int) (long) bind_cpu); + runqueue_t *rq; + int ret; + + daemonize(); + sigfillset(¤t->blocked); + set_fs(KERNEL_DS); + /* + * The first migration thread is started on CPU #0. This one can + * migrate the other migration threads to their destination CPUs. + */ + if (cpu != 0) { + while (!cpu_rq(cpu_logical_map(0))->migration_thread) + yield(); + set_cpus_allowed(current, 1UL << cpu); + } + printk("migration_task %d on cpu=%d\n", cpu, smp_processor_id()); + ret = setscheduler(0, SCHED_FIFO, ¶m); + + rq = this_rq(); + rq->migration_thread = current; + + sprintf(current->comm, "migration_CPU%d", smp_processor_id()); + + for (;;) { + runqueue_t *rq_src, *rq_dest; + struct list_head *head; + int cpu_src, cpu_dest; + migration_req_t *req; + unsigned long flags; + task_t *p; + + spin_lock_irqsave(&rq->lock, flags); + head = &rq->migration_queue; + current->state = TASK_INTERRUPTIBLE; + if (list_empty(head)) { + spin_unlock_irqrestore(&rq->lock, flags); + schedule(); + continue; + } + req = list_entry(head->next, migration_req_t, list); + list_del_init(head->next); + spin_unlock_irqrestore(&rq->lock, flags); + + p = req->task; + cpu_dest = __ffs(p->cpus_allowed); + rq_dest = cpu_rq(cpu_dest); +repeat: + cpu_src = task_cpu(p); + rq_src = cpu_rq(cpu_src); + + local_irq_save(flags); + double_rq_lock(rq_src, rq_dest); + if (task_cpu(p) != cpu_src) { + double_rq_unlock(rq_src, rq_dest); + local_irq_restore(flags); + goto repeat; + } + if (rq_src == rq) { + set_task_cpu(p, cpu_dest); + if (p->array) { + deactivate_task(p, rq_src); + activate_task(p, rq_dest); + } + } + double_rq_unlock(rq_src, rq_dest); + local_irq_restore(flags); - if (current != &init_task && task_on_runqueue(current)) { - printk("UGH! (%d:%d) was on the runqueue, removing.\n", - smp_processor_id(), current->pid); - del_from_runqueue(current); + up(&req->sem); } - sched_data->curr = current; - sched_data->last_schedule = get_cycles(); - clear_bit(current->processor, &wait_init_idle); } -extern void init_timervecs (void); - -void __init sched_init(void) +void __init migration_init(void) { - /* - * We have to do a little magic to get the first - * process right in SMP mode. - */ - int cpu = smp_processor_id(); - int nr; - - init_task.processor = cpu; - - for(nr = 0; nr < PIDHASH_SZ; nr++) - pidhash[nr] = NULL; - - init_timervecs(); + int cpu; - init_bh(TIMER_BH, timer_bh); - init_bh(TQUEUE_BH, tqueue_bh); - init_bh(IMMEDIATE_BH, immediate_bh); + current->cpus_allowed = 1UL << cpu_logical_map(0); + for (cpu = 0; cpu < smp_num_cpus; cpu++) + if (kernel_thread(migration_thread, (void *) (long) cpu, + CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0) + BUG(); + current->cpus_allowed = -1L; - /* - * The boot idle thread does lazy MMU switching as well: - */ - atomic_inc(&init_mm.mm_count); - enter_lazy_tlb(&init_mm, current, cpu); + for (cpu = 0; cpu < smp_num_cpus; cpu++) + while (!cpu_rq(cpu_logical_map(cpu))->migration_thread) + schedule_timeout(2); } +#endif diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/kernel/signal.c linux.21pre4-ac2/kernel/signal.c --- linux.21pre4/kernel/signal.c 2003-01-29 17:07:59.000000000 +0000 +++ linux.21pre4-ac2/kernel/signal.c 2003-01-06 19:37:52.000000000 +0000 @@ -492,7 +492,7 @@ * No need to set need_resched since signal event passing * goes through ->blocked */ -static inline void signal_wake_up(struct task_struct *t) +inline void signal_wake_up(struct task_struct *t) { t->sigpending = 1; @@ -507,12 +507,9 @@ * process of changing - but no harm is done by that * other than doing an extra (lightweight) IPI interrupt. */ - spin_lock(&runqueue_lock); - if (task_has_cpu(t) && t->processor != smp_processor_id()) - smp_send_reschedule(t->processor); - spin_unlock(&runqueue_lock); -#endif /* CONFIG_SMP */ - + if ((t->state == TASK_RUNNING) && (t->cpu != cpu())) + kick_if_running(t); +#endif if (t->state & TASK_INTERRUPTIBLE) { wake_up_process(t); return; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/kernel/softirq.c linux.21pre4-ac2/kernel/softirq.c --- linux.21pre4/kernel/softirq.c 2003-01-29 16:26:32.000000000 +0000 +++ linux.21pre4-ac2/kernel/softirq.c 2003-01-06 19:28:13.000000000 +0000 @@ -364,13 +364,13 @@ int cpu = cpu_logical_map(bind_cpu); daemonize(); - current->nice = 19; + set_user_nice(current, 19); sigfillset(¤t->blocked); /* Migrate to the right CPU */ - current->cpus_allowed = 1UL << cpu; - while (smp_processor_id() != cpu) - schedule(); + set_cpus_allowed(current, 1UL << cpu); + if (cpu() != cpu) + BUG(); sprintf(current->comm, "ksoftirqd_CPU%d", bind_cpu); @@ -395,7 +395,7 @@ } } -static __init int spawn_ksoftirqd(void) +__init int spawn_ksoftirqd(void) { int cpu; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/kernel/sys.c linux.21pre4-ac2/kernel/sys.c --- linux.21pre4/kernel/sys.c 2003-01-29 17:07:59.000000000 +0000 +++ linux.21pre4-ac2/kernel/sys.c 2003-01-30 16:55:56.000000000 +0000 @@ -4,6 +4,7 @@ * Copyright (C) 1991, 1992 Linus Torvalds */ +#include #include #include #include @@ -239,10 +240,10 @@ } if (error == -ESRCH) error = 0; - if (niceval < p->nice && !capable(CAP_SYS_NICE)) + if (niceval < task_nice(p) && !capable(CAP_SYS_NICE)) error = -EACCES; else - p->nice = niceval; + set_user_nice(p, niceval); } read_unlock(&tasklist_lock); @@ -268,7 +269,7 @@ long niceval; if (!proc_sel(p, which, who)) continue; - niceval = 20 - p->nice; + niceval = 20 - task_nice(p); if (niceval > retval) retval = niceval; } @@ -509,9 +510,10 @@ } } -static int set_user(uid_t new_ruid, int dumpclear) +int set_user(uid_t new_ruid, int dumpclear) { struct user_struct *new_user, *old_user; + struct task_struct *this_task = current; /* What if a process setreuid()'s and this brings the * new uid over his NPROC rlimit? We can check this now @@ -521,17 +523,16 @@ new_user = alloc_uid(new_ruid); if (!new_user) return -EAGAIN; - old_user = current->user; - atomic_dec(&old_user->processes); + old_user = this_task->user; atomic_inc(&new_user->processes); + atomic_dec(&old_user->processes); - if(dumpclear) - { - current->mm->dumpable = 0; + if (dumpclear && this_task->mm) { + this_task->mm->dumpable = 0; wmb(); } - current->uid = new_ruid; - current->user = new_user; + this_task->uid = new_ruid; + this_task->user = new_user; free_uid(old_user); return 0; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/kernel/sysctl.c linux.21pre4-ac2/kernel/sysctl.c --- linux.21pre4/kernel/sysctl.c 2003-01-29 17:07:59.000000000 +0000 +++ linux.21pre4-ac2/kernel/sysctl.c 2003-01-06 23:26:26.000000000 +0000 @@ -80,6 +80,11 @@ extern int stop_a_enabled; #endif +#ifdef __hppa__ +extern int pwrsw_enabled; +extern int unaligned_enabled; +#endif + #ifdef CONFIG_ARCH_S390 #ifdef CONFIG_MATHEMU extern int sysctl_ieee_emulation_warnings; @@ -188,6 +193,12 @@ {KERN_SPARC_STOP_A, "stop-a", &stop_a_enabled, sizeof (int), 0644, NULL, &proc_dointvec}, #endif +#ifdef __hppa__ + {KERN_HPPA_PWRSW, "soft-power", &pwrsw_enabled, sizeof (int), + 0644, NULL, &proc_dointvec}, + {KERN_HPPA_UNALIGNED, "unaligned-trap", &unaligned_enabled, sizeof (int), + 0644, NULL, &proc_dointvec}, +#endif #ifdef CONFIG_PPC32 {KERN_PPC_ZEROPAGED, "zero-paged", &zero_paged_on, sizeof(int), 0644, NULL, &proc_dointvec}, @@ -296,8 +307,6 @@ 0444, NULL, &proc_dointvec}, {FS_MAXFILE, "file-max", &files_stat.max_files, sizeof(int), 0644, NULL, &proc_dointvec}, - {FS_NRDQUOT, "dquot-nr", &nr_dquots, 2*sizeof(int), - 0444, NULL, &proc_dointvec}, {FS_DENTRY, "dentry-state", &dentry_stat, 6*sizeof(int), 0444, NULL, &proc_dointvec}, {FS_OVERFLOWUID, "overflowuid", &fs_overflowuid, sizeof(int), 0644, NULL, diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/kernel/timer.c linux.21pre4-ac2/kernel/timer.c --- linux.21pre4/kernel/timer.c 2003-01-29 16:26:32.000000000 +0000 +++ linux.21pre4-ac2/kernel/timer.c 2003-01-06 19:47:54.000000000 +0000 @@ -25,6 +25,8 @@ #include +struct kernel_stat kstat; + /* * Timekeeping variables */ @@ -598,25 +600,7 @@ int cpu = smp_processor_id(), system = user_tick ^ 1; update_one_process(p, user_tick, system, cpu); - if (p->pid) { - if (--p->counter <= 0) { - p->counter = 0; - /* - * SCHED_FIFO is priority preemption, so this is - * not the place to decide whether to reschedule a - * SCHED_FIFO task or not - Bhavesh Davda - */ - if (p->policy != SCHED_FIFO) { - p->need_resched = 1; - } - } - if (p->nice > 0) - kstat.per_cpu_nice[cpu] += user_tick; - else - kstat.per_cpu_user[cpu] += user_tick; - kstat.per_cpu_system[cpu] += system; - } else if (local_bh_count(cpu) || local_irq_count(cpu) > 1) - kstat.per_cpu_system[cpu] += system; + scheduler_tick(user_tick, system); } /* @@ -624,17 +608,7 @@ */ static unsigned long count_active_tasks(void) { - struct task_struct *p; - unsigned long nr = 0; - - read_lock(&tasklist_lock); - for_each_task(p) { - if ((p->state == TASK_RUNNING || - (p->state & TASK_UNINTERRUPTIBLE))) - nr += FIXED_1; - } - read_unlock(&tasklist_lock); - return nr; + return (nr_running() + nr_uninterruptible()) * FIXED_1; } /* @@ -827,6 +801,89 @@ #endif +static void process_timeout(unsigned long __data) +{ + wake_up_process((task_t *)__data); +} + +/** + * schedule_timeout - sleep until timeout + * @timeout: timeout value in jiffies + * + * Make the current task sleep until @timeout jiffies have + * elapsed. The routine will return immediately unless + * the current task state has been set (see set_current_state()). + * + * You can set the task state as follows - + * + * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to + * pass before the routine returns. The routine will return 0 + * + * %TASK_INTERRUPTIBLE - the routine may return early if a signal is + * delivered to the current task. In this case the remaining time + * in jiffies will be returned, or 0 if the timer expired in time + * + * The current task state is guaranteed to be TASK_RUNNING when this + * routine returns. + * + * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule + * the CPU away without a bound on the timeout. In this case the return + * value will be %MAX_SCHEDULE_TIMEOUT. + * + * In all cases the return value is guaranteed to be non-negative. + */ +signed long schedule_timeout(signed long timeout) +{ + struct timer_list timer; + unsigned long expire; + + switch (timeout) + { + case MAX_SCHEDULE_TIMEOUT: + /* + * These two special cases are useful to be comfortable + * in the caller. Nothing more. We could take + * MAX_SCHEDULE_TIMEOUT from one of the negative value + * but I' d like to return a valid offset (>=0) to allow + * the caller to do everything it want with the retval. + */ + schedule(); + goto out; + default: + /* + * Another bit of PARANOID. Note that the retval will be + * 0 since no piece of kernel is supposed to do a check + * for a negative retval of schedule_timeout() (since it + * should never happens anyway). You just have the printk() + * that will tell you if something is gone wrong and where. + */ + if (timeout < 0) + { + printk(KERN_ERR "schedule_timeout: wrong timeout " + "value %lx from %p\n", timeout, + __builtin_return_address(0)); + current->state = TASK_RUNNING; + goto out; + } + } + + expire = timeout + jiffies; + + init_timer(&timer); + timer.expires = expire; + timer.data = (unsigned long) current; + timer.function = process_timeout; + + add_timer(&timer); + schedule(); + del_timer_sync(&timer); + + timeout = expire - jiffies; + + out: + return timeout < 0 ? 0 : timeout; +} + /* Thread ID - the internal kernel "pid" */ asmlinkage long sys_gettid(void) { @@ -873,4 +930,3 @@ } return 0; } - diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/MAINTAINERS linux.21pre4-ac2/MAINTAINERS --- linux.21pre4/MAINTAINERS 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/MAINTAINERS 2003-01-29 17:26:26.000000000 +0000 @@ -440,6 +440,13 @@ W: http://www.debian.org/~dz/i8k/ S: Maintained +DEVICE-MAPPER +P: Joe Thornber +M: thornber@sistina.com +L: dm@uk.sistina.com +W: http://www.sistina.com/lvm +S: Maintained + DEVICE NUMBER REGISTRY P: H. Peter Anvin M: hpa@zytor.com @@ -1595,6 +1602,12 @@ L: linux-video@atrey.karlin.mff.cuni.cz S: Maintained +SYSKONNECT DRIVER +P: Mirko Lindner +M: mlindner@syskonnect.de +W: http://www.syskonnect.com +S: Supported + SYSV FILESYSTEM P: Christoph Hellwig M: hch@infradead.org diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/Makefile linux.21pre4-ac2/Makefile --- linux.21pre4/Makefile 2003-01-29 17:07:44.000000000 +0000 +++ linux.21pre4-ac2/Makefile 2003-01-29 17:26:26.000000000 +0000 @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 4 SUBLEVEL = 21 -EXTRAVERSION = -pre4 +EXTRAVERSION = -pre4-ac1 KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) @@ -40,10 +40,13 @@ MODFLAGS = -DMODULE CFLAGS_KERNEL = PERL = perl +AWK = awk +RPM := $(shell if [ -x "/usr/bin/rpmbuild" ]; then echo rpmbuild; \ + else echo rpm; fi) export VERSION PATCHLEVEL SUBLEVEL EXTRAVERSION KERNELRELEASE ARCH \ CONFIG_SHELL TOPDIR HPATH HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC \ - CPP AR NM STRIP OBJCOPY OBJDUMP MAKE MAKEFILES GENKSYMS MODFLAGS PERL + CPP AR NM STRIP OBJCOPY OBJDUMP MAKE MAKEFILES GENKSYMS MODFLAGS PERL AWK all: do-it-all @@ -138,6 +141,7 @@ drivers/block/block.o \ drivers/misc/misc.o \ drivers/net/net.o + DRIVERS-$(CONFIG_AGP) += drivers/char/agp/agp.o DRIVERS-$(CONFIG_DRM_NEW) += drivers/char/drm/drm.o DRIVERS-$(CONFIG_DRM_OLD) += drivers/char/drm-4.0/drm.o @@ -171,7 +175,7 @@ DRIVERS-$(CONFIG_FC4) += drivers/fc4/fc4.a DRIVERS-$(CONFIG_PPC32) += drivers/macintosh/macintosh.o DRIVERS-$(CONFIG_MAC) += drivers/macintosh/macintosh.o -DRIVERS-$(CONFIG_ISAPNP) += drivers/pnp/pnp.o +DRIVERS-$(CONFIG_PNP) += drivers/pnp/pnp.o DRIVERS-$(CONFIG_SGI_IP22) += drivers/sgi/sgi.a DRIVERS-$(CONFIG_VT) += drivers/video/video.o DRIVERS-$(CONFIG_PARIDE) += drivers/block/paride/paride.a @@ -199,6 +203,7 @@ kernel/ksyms.lst include/linux/compile.h \ vmlinux System.map \ .tmp* \ + scripts/mkconfigs kernel/configs.c kernel/configs.o \ drivers/char/consolemap_deftbl.c drivers/video/promcon_tbl.c \ drivers/char/conmakehash \ drivers/char/drm/*-mod.c \ @@ -226,6 +231,7 @@ # files removed with 'make mrproper' MRPROPER_FILES = \ include/linux/autoconf.h include/linux/version.h \ + tmp* \ drivers/net/hamradio/soundmodem/sm_tbl_{afsk1200,afsk2666,fsk9600}.h \ drivers/net/hamradio/soundmodem/sm_tbl_{hapn4800,psk4800}.h \ drivers/net/hamradio/soundmodem/sm_tbl_{afsk2400_7,afsk2400_8}.h \ @@ -243,6 +249,7 @@ include/asm \ .hdepend scripts/mkdep scripts/split-include scripts/docproc \ $(TOPDIR)/include/linux/modversions.h \ + scripts/mkconfigs kernel/configs.c kernel/configs.o \ kernel.spec # directories removed with 'make mrproper' @@ -316,7 +323,7 @@ linuxsubdirs: $(patsubst %, _dir_%, $(SUBDIRS)) -$(patsubst %, _dir_%, $(SUBDIRS)) : dummy include/linux/version.h include/config/MARKER +$(patsubst %, _dir_%, $(SUBDIRS)) : dummy include/linux/version.h tmp_include_depends $(MAKE) CFLAGS="$(CFLAGS) $(CFLAGS_KERNEL)" -C $(patsubst _dir_%, %, $@) $(TOPDIR)/include/linux/version.h: include/linux/version.h @@ -356,13 +363,13 @@ comma := , -init/version.o: init/version.c include/linux/compile.h include/config/MARKER +init/version.o: init/version.c include/linux/compile.h tmp_include_depends $(CC) $(CFLAGS) $(CFLAGS_KERNEL) -DUTS_MACHINE='"$(ARCH)"' -DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) -c -o init/version.o init/version.c -init/main.o: init/main.c include/config/MARKER +init/main.o: init/main.c tmp_include_depends $(CC) $(CFLAGS) $(CFLAGS_KERNEL) $(PROFILING) -DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) -c -o $@ $< -init/do_mounts.o: init/do_mounts.c include/config/MARKER +init/do_mounts.o: init/do_mounts.c tmp_include_depends $(CC) $(CFLAGS) $(CFLAGS_KERNEL) $(PROFILING) -DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) -c -o $@ $< fs lib mm ipc kernel drivers net: dummy @@ -389,7 +396,7 @@ modules: $(patsubst %, _mod_%, $(SUBDIRS)) .PHONY: $(patsubst %, _mod_%, $(SUBDIRS)) -$(patsubst %, _mod_%, $(SUBDIRS)) : include/linux/version.h include/config/MARKER +$(patsubst %, _mod_%, $(SUBDIRS)) : include/linux/version.h tmp_include_depends $(MAKE) -C $(patsubst _mod_%, %, $@) CFLAGS="$(CFLAGS) $(MODFLAGS)" MAKING_MODULES=1 modules .PHONY: modules_install @@ -494,6 +501,13 @@ ifdef CONFIG_MODVERSIONS $(MAKE) update-modverfile endif + (find $(TOPDIR) \( -name .depend -o -name .hdepend \) -print | xargs $(AWK) -f scripts/include_deps) > tmp_include_depends + sed -ne 's/^\([^ ].*\):.*/ \1 \\/p' tmp_include_depends > tmp_include_depends_1 + (echo ""; echo "all: \\"; cat tmp_include_depends_1; echo "") >> tmp_include_depends + rm tmp_include_depends_1 + +tmp_include_depends: include/config/MARKER dummy + $(MAKE) -r -f tmp_include_depends all ifdef CONFIG_MODVERSIONS MODVERFILE := $(TOPDIR)/include/linux/modversions.h @@ -569,5 +583,5 @@ rm $(KERNELPATH) ; \ cd $(TOPDIR) ; \ . scripts/mkversion > .version ; \ - rpm -ta $(TOPDIR)/../$(KERNELPATH).tar.gz ; \ + $(RPM) -ta $(TOPDIR)/../$(KERNELPATH).tar.gz ; \ rm $(TOPDIR)/../$(KERNELPATH).tar.gz diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/mm/filemap.c linux.21pre4-ac2/mm/filemap.c --- linux.21pre4/mm/filemap.c 2003-01-29 17:07:59.000000000 +0000 +++ linux.21pre4-ac2/mm/filemap.c 2003-01-30 16:46:44.000000000 +0000 @@ -1602,7 +1602,7 @@ if (retval) break; - retval = mapping->a_ops->direct_IO(rw, inode, iobuf, (offset+progress) >> blocksize_bits, blocksize); + retval = mapping->a_ops->direct_IO(rw, filp, iobuf, (offset+progress) >> blocksize_bits, blocksize); if (rw == READ && retval > 0) mark_dirty_kiobuf(iobuf, retval); @@ -1743,7 +1743,7 @@ return written; } -asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, off_t *offset, size_t count) +static ssize_t common_sendfile(int out_fd, int in_fd, loff_t *offset, size_t count) { ssize_t retval; struct file * in_file, * out_file; @@ -1788,27 +1788,19 @@ retval = 0; if (count) { read_descriptor_t desc; - loff_t pos = 0, *ppos; - - retval = -EFAULT; - ppos = &in_file->f_pos; - if (offset) { - if (get_user(pos, offset)) - goto fput_out; - ppos = &pos; - } + + if (!offset) + offset = &in_file->f_pos; desc.written = 0; desc.count = count; desc.buf = (char *) out_file; desc.error = 0; - do_generic_file_read(in_file, ppos, &desc, file_send_actor); + do_generic_file_read(in_file, offset, &desc, file_send_actor); retval = desc.written; if (!retval) retval = desc.error; - if (offset) - put_user(pos, offset); } fput_out: @@ -1819,6 +1811,42 @@ return retval; } +asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, off_t *offset, size_t count) +{ + loff_t pos, *ppos = NULL; + ssize_t ret; + + if (offset) { + off_t off; + if (unlikely(get_user(off, offset))) + return -EFAULT; + pos = off; + ppos = &pos; + } + + ret = common_sendfile(out_fd, in_fd, ppos, count); + if (offset) + put_user((off_t)pos, offset); + return ret; +} + +asmlinkage ssize_t sys_sendfile64(int out_fd, int in_fd, loff_t *offset, size_t count) +{ + loff_t pos, *ppos = NULL; + ssize_t ret; + + if (offset) { + if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t)))) + return -EFAULT; + ppos = &pos; + } + + ret = common_sendfile(out_fd, in_fd, ppos, count); + if (offset) + put_user(pos, offset); + return ret; +} + static ssize_t do_readahead(struct file *file, unsigned long index, unsigned long nr) { struct address_space *mapping = file->f_dentry->d_inode->i_mapping; @@ -2977,13 +3005,16 @@ * Check whether we've reached the file size limit. */ err = -EFBIG; - + if (!S_ISBLK(inode->i_mode) && limit != RLIM_INFINITY) { if (pos >= limit) { send_sig(SIGXFSZ, current, 0); goto out; } - if (pos > 0xFFFFFFFFULL || count > limit - (u32)pos) { + /* Fix this up when we got to rlimit64 */ + if (pos > 0xFFFFFFFFULL) + count = 0; + else if(count > limit - (u32)pos) { /* send_sig(SIGXFSZ, current, 0); */ count = limit - (u32)pos; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/mm/highmem.c linux.21pre4-ac2/mm/highmem.c --- linux.21pre4/mm/highmem.c 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/mm/highmem.c 2003-01-06 19:29:40.000000000 +0000 @@ -355,6 +355,7 @@ /* we need to wait I/O completion */ run_task_queue(&tq_disk); + __set_current_state(TASK_RUNNING); yield(); goto repeat_alloc; } @@ -391,6 +392,7 @@ /* we need to wait I/O completion */ run_task_queue(&tq_disk); + __set_current_state(TASK_RUNNING); yield(); goto repeat_alloc; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/mm/Makefile linux.21pre4-ac2/mm/Makefile --- linux.21pre4/mm/Makefile 2003-01-29 16:26:32.000000000 +0000 +++ linux.21pre4-ac2/mm/Makefile 2003-01-06 19:29:55.000000000 +0000 @@ -9,12 +9,12 @@ O_TARGET := mm.o -export-objs := shmem.o filemap.o memory.o page_alloc.o +export-objs := shmem.o filemap.o memory.o page_alloc.o mempool.o obj-y := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \ vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o \ page_alloc.o swap_state.o swapfile.o numa.o oom_kill.o \ - shmem.o + shmem.o mempool.o obj-$(CONFIG_HIGHMEM) += highmem.o diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/mm/mempool.c linux.21pre4-ac2/mm/mempool.c --- linux.21pre4/mm/mempool.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/mm/mempool.c 2003-01-06 17:37:31.000000000 +0000 @@ -0,0 +1,299 @@ +/* + * linux/mm/mempool.c + * + * memory buffer pool support. Such pools are mostly used + * for guaranteed, deadlock-free memory allocations during + * extreme VM load. + * + * started by Ingo Molnar, Copyright (C) 2001 + */ + +#include +#include +#include +#include + +struct mempool_s { + spinlock_t lock; + int min_nr; /* nr of elements at *elements */ + int curr_nr; /* Current nr of elements at *elements */ + void **elements; + + void *pool_data; + mempool_alloc_t *alloc; + mempool_free_t *free; + wait_queue_head_t wait; +}; + +static void add_element(mempool_t *pool, void *element) +{ + BUG_ON(pool->curr_nr >= pool->min_nr); + pool->elements[pool->curr_nr++] = element; +} + +static void *remove_element(mempool_t *pool) +{ + BUG_ON(pool->curr_nr <= 0); + return pool->elements[--pool->curr_nr]; +} + +static void free_pool(mempool_t *pool) +{ + while (pool->curr_nr) { + void *element = remove_element(pool); + pool->free(element, pool->pool_data); + } + kfree(pool->elements); + kfree(pool); +} + +/** + * mempool_create - create a memory pool + * @min_nr: the minimum number of elements guaranteed to be + * allocated for this pool. + * @alloc_fn: user-defined element-allocation function. + * @free_fn: user-defined element-freeing function. + * @pool_data: optional private data available to the user-defined functions. + * + * this function creates and allocates a guaranteed size, preallocated + * memory pool. The pool can be used from the mempool_alloc and mempool_free + * functions. This function might sleep. Both the alloc_fn() and the free_fn() + * functions might sleep - as long as the mempool_alloc function is not called + * from IRQ contexts. + */ +mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data) +{ + mempool_t *pool; + + pool = kmalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + return NULL; + memset(pool, 0, sizeof(*pool)); + pool->elements = kmalloc(min_nr * sizeof(void *), GFP_KERNEL); + if (!pool->elements) { + kfree(pool); + return NULL; + } + spin_lock_init(&pool->lock); + pool->min_nr = min_nr; + pool->pool_data = pool_data; + init_waitqueue_head(&pool->wait); + pool->alloc = alloc_fn; + pool->free = free_fn; + + /* + * First pre-allocate the guaranteed number of buffers. + */ + while (pool->curr_nr < pool->min_nr) { + void *element; + + element = pool->alloc(GFP_KERNEL, pool->pool_data); + if (unlikely(!element)) { + free_pool(pool); + return NULL; + } + add_element(pool, element); + } + return pool; +} + +/** + * mempool_resize - resize an existing memory pool + * @pool: pointer to the memory pool which was allocated via + * mempool_create(). + * @new_min_nr: the new minimum number of elements guaranteed to be + * allocated for this pool. + * @gfp_mask: the usual allocation bitmask. + * + * This function shrinks/grows the pool. In the case of growing, + * it cannot be guaranteed that the pool will be grown to the new + * size immediately, but new mempool_free() calls will refill it. + * + * Note, the caller must guarantee that no mempool_destroy is called + * while this function is running. mempool_alloc() & mempool_free() + * might be called (eg. from IRQ contexts) while this function executes. + */ +int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask) +{ + void *element; + void **new_elements; + unsigned long flags; + + BUG_ON(new_min_nr <= 0); + + spin_lock_irqsave(&pool->lock, flags); + if (new_min_nr < pool->min_nr) { + while (pool->curr_nr > new_min_nr) { + element = remove_element(pool); + spin_unlock_irqrestore(&pool->lock, flags); + pool->free(element, pool->pool_data); + spin_lock_irqsave(&pool->lock, flags); + } + pool->min_nr = new_min_nr; + goto out_unlock; + } + spin_unlock_irqrestore(&pool->lock, flags); + + /* Grow the pool */ + new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask); + if (!new_elements) + return -ENOMEM; + + spin_lock_irqsave(&pool->lock, flags); + memcpy(new_elements, pool->elements, + pool->curr_nr * sizeof(*new_elements)); + kfree(pool->elements); + pool->elements = new_elements; + pool->min_nr = new_min_nr; + + while (pool->curr_nr < pool->min_nr) { + spin_unlock_irqrestore(&pool->lock, flags); + element = pool->alloc(gfp_mask, pool->pool_data); + if (!element) + goto out; + spin_lock_irqsave(&pool->lock, flags); + if (pool->curr_nr < pool->min_nr) + add_element(pool, element); + else + kfree(element); /* Raced */ + } +out_unlock: + spin_unlock_irqrestore(&pool->lock, flags); +out: + return 0; +} + +/** + * mempool_destroy - deallocate a memory pool + * @pool: pointer to the memory pool which was allocated via + * mempool_create(). + * + * this function only sleeps if the free_fn() function sleeps. The caller + * has to guarantee that all elements have been returned to the pool (ie: + * freed) prior to calling mempool_destroy(). + */ +void mempool_destroy(mempool_t *pool) +{ + if (pool->curr_nr != pool->min_nr) + BUG(); /* There were outstanding elements */ + free_pool(pool); +} + +/** + * mempool_alloc - allocate an element from a specific memory pool + * @pool: pointer to the memory pool which was allocated via + * mempool_create(). + * @gfp_mask: the usual allocation bitmask. + * + * this function only sleeps if the alloc_fn function sleeps or + * returns NULL. Note that due to preallocation, this function + * *never* fails when called from process contexts. (it might + * fail if called from an IRQ context.) + */ +void * mempool_alloc(mempool_t *pool, int gfp_mask) +{ + void *element; + unsigned long flags; + int curr_nr; + DECLARE_WAITQUEUE(wait, current); + int gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO); + +repeat_alloc: + element = pool->alloc(gfp_nowait, pool->pool_data); + if (likely(element != NULL)) + return element; + + /* + * If the pool is less than 50% full then try harder + * to allocate an element: + */ + if ((gfp_mask != gfp_nowait) && (pool->curr_nr <= pool->min_nr/2)) { + element = pool->alloc(gfp_mask, pool->pool_data); + if (likely(element != NULL)) + return element; + } + + /* + * Kick the VM at this point. + */ + wakeup_bdflush(); + + spin_lock_irqsave(&pool->lock, flags); + if (likely(pool->curr_nr)) { + element = remove_element(pool); + spin_unlock_irqrestore(&pool->lock, flags); + return element; + } + spin_unlock_irqrestore(&pool->lock, flags); + + /* We must not sleep in the GFP_ATOMIC case */ + if (gfp_mask == gfp_nowait) + return NULL; + + run_task_queue(&tq_disk); + + add_wait_queue_exclusive(&pool->wait, &wait); + set_task_state(current, TASK_UNINTERRUPTIBLE); + + spin_lock_irqsave(&pool->lock, flags); + curr_nr = pool->curr_nr; + spin_unlock_irqrestore(&pool->lock, flags); + + if (!curr_nr) + schedule(); + + current->state = TASK_RUNNING; + remove_wait_queue(&pool->wait, &wait); + + goto repeat_alloc; +} + +/** + * mempool_free - return an element to the pool. + * @element: pool element pointer. + * @pool: pointer to the memory pool which was allocated via + * mempool_create(). + * + * this function only sleeps if the free_fn() function sleeps. + */ +void mempool_free(void *element, mempool_t *pool) +{ + unsigned long flags; + + if (pool->curr_nr < pool->min_nr) { + spin_lock_irqsave(&pool->lock, flags); + if (pool->curr_nr < pool->min_nr) { + add_element(pool, element); + spin_unlock_irqrestore(&pool->lock, flags); + wake_up(&pool->wait); + return; + } + spin_unlock_irqrestore(&pool->lock, flags); + } + pool->free(element, pool->pool_data); +} + +/* + * A commonly used alloc and free fn. + */ +void *mempool_alloc_slab(int gfp_mask, void *pool_data) +{ + kmem_cache_t *mem = (kmem_cache_t *) pool_data; + return kmem_cache_alloc(mem, gfp_mask); +} + +void mempool_free_slab(void *element, void *pool_data) +{ + kmem_cache_t *mem = (kmem_cache_t *) pool_data; + kmem_cache_free(mem, element); +} + + +EXPORT_SYMBOL(mempool_create); +EXPORT_SYMBOL(mempool_resize); +EXPORT_SYMBOL(mempool_destroy); +EXPORT_SYMBOL(mempool_alloc); +EXPORT_SYMBOL(mempool_free); +EXPORT_SYMBOL(mempool_alloc_slab); +EXPORT_SYMBOL(mempool_free_slab); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/mm/mlock.c linux.21pre4-ac2/mm/mlock.c --- linux.21pre4/mm/mlock.c 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/mm/mlock.c 2003-01-06 17:37:40.000000000 +0000 @@ -198,6 +198,7 @@ unsigned long lock_limit; int error = -ENOMEM; + vm_validate_enough("entering sys_mlock"); down_write(¤t->mm->mmap_sem); len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; @@ -220,6 +221,7 @@ error = do_mlock(start, len, 1); out: up_write(¤t->mm->mmap_sem); + vm_validate_enough("exiting sys_mlock"); return error; } @@ -227,11 +229,13 @@ { int ret; + vm_validate_enough("entering sys_munlock"); down_write(¤t->mm->mmap_sem); len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; ret = do_mlock(start, len, 0); up_write(¤t->mm->mmap_sem); + vm_validate_enough("exiting sys_munlock"); return ret; } @@ -268,6 +272,8 @@ unsigned long lock_limit; int ret = -EINVAL; + vm_validate_enough("entering sys_mlockall"); + down_write(¤t->mm->mmap_sem); if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE))) goto out; @@ -287,15 +293,18 @@ ret = do_mlockall(flags); out: up_write(¤t->mm->mmap_sem); + vm_validate_enough("exiting sys_mlockall"); return ret; } asmlinkage long sys_munlockall(void) { int ret; + vm_validate_enough("entering sys_munlockall"); down_write(¤t->mm->mmap_sem); ret = do_mlockall(0); up_write(¤t->mm->mmap_sem); + vm_validate_enough("exiting sys_munlockall"); return ret; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/mm/mmap.c linux.21pre4-ac2/mm/mmap.c --- linux.21pre4/mm/mmap.c 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/mm/mmap.c 2003-01-30 16:45:05.000000000 +0000 @@ -1,8 +1,25 @@ /* * linux/mm/mmap.c - * * Written by obz. + * + * Address space accounting code + * (c) Copyright 2002 Red Hat Inc, All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + #include #include #include @@ -46,6 +63,7 @@ int sysctl_overcommit_memory; int max_map_count = DEFAULT_MAX_MAP_COUNT; +atomic_t vm_committed_space = ATOMIC_INIT(0); /* Check that a process has enough memory to allocate a * new virtual mapping. @@ -55,42 +73,109 @@ /* Stupid algorithm to decide if we have enough memory: while * simple, it hopefully works in most obvious cases.. Easy to * fool it, but this should catch most mistakes. - */ - /* 23/11/98 NJC: Somewhat less stupid version of algorithm, + * + * 23/11/98 NJC: Somewhat less stupid version of algorithm, * which tries to do "TheRightThing". Instead of using half of * (buffers+cache), use the minimum values. Allow an extra 2% * of num_physpages for safety margin. + * + * 2002/02/26 Alan Cox: Added two new modes that do real accounting */ + unsigned long free, allowed; + struct sysinfo i; - unsigned long free; + atomic_add(pages, &vm_committed_space); /* Sometimes we want to use more memory than we have. */ - if (sysctl_overcommit_memory) - return 1; + if (sysctl_overcommit_memory == 1) + return 1; + + if (sysctl_overcommit_memory == 0) + { + /* The page cache contains buffer pages these days.. */ + free = atomic_read(&page_cache_size); + free += nr_free_pages(); + free += nr_swap_pages; + + /* + * This double-counts: the nrpages are both in the page-cache + * and in the swapper space. At the same time, this compensates + * for the swap-space over-allocation (ie "nr_swap_pages" being + * too small. + */ + free += swapper_space.nrpages; + + /* + * The code below doesn't account for free space in the inode + * and dentry slab cache, slab cache fragmentation, inodes and + * dentries which will become freeable under VM load, etc. + * Lets just hope all these (complex) factors balance out... + */ + free += (dentry_stat.nr_unused * sizeof(struct dentry)) >> PAGE_SHIFT; + free += (inodes_stat.nr_unused * sizeof(struct inode)) >> PAGE_SHIFT; + + if(free > pages) + return 1; + atomic_sub(pages, &vm_committed_space); + return 0; + } + allowed = total_swap_pages; + + if(sysctl_overcommit_memory == 2) + { + /* FIXME - need to add arch hooks to get the bits we need + without the higher overhead crap */ + si_meminfo(&i); + allowed += i.totalram >> 1; + } + if(atomic_read(&vm_committed_space) < allowed) + return 1; + atomic_sub(pages, &vm_committed_space); + return 0; + +} - /* The page cache contains buffer pages these days.. */ - free = atomic_read(&page_cache_size); - free += nr_free_pages(); - free += nr_swap_pages; +void vm_unacct_memory(long pages) +{ + atomic_sub(pages, &vm_committed_space); +} - /* - * This double-counts: the nrpages are both in the page-cache - * and in the swapper space. At the same time, this compensates - * for the swap-space over-allocation (ie "nr_swap_pages" being - * too small. - */ - free += swapper_space.nrpages; +/* + * Don't even bother telling me the locking is wrong - its a test + * routine and uniprocessor is quite sufficient.. + * + * To enable this debugging you must tweak the #if below, and build + * with no SYS5 shared memory (thats not validated yet) and non SMP + */ - /* - * The code below doesn't account for free space in the inode - * and dentry slab cache, slab cache fragmentation, inodes and - * dentries which will become freeable under VM load, etc. - * Lets just hope all these (complex) factors balance out... - */ - free += (dentry_stat.nr_unused * sizeof(struct dentry)) >> PAGE_SHIFT; - free += (inodes_stat.nr_unused * sizeof(struct inode)) >> PAGE_SHIFT; +void vm_validate_enough(char *x) +{ +#if 0 + unsigned long count = 0UL; + struct mm_struct *mm; + struct vm_area_struct *vma; + struct list_head *mmp; + unsigned long flags; - return free > pages; + spin_lock_irqsave(&mmlist_lock, flags); + + list_for_each(mmp, &init_mm.mmlist) + { + mm = list_entry(mmp, struct mm_struct, mmlist); + for(vma = mm->mmap; vma!=NULL; vma=vma->vm_next) + { + if(vma->vm_flags & VM_ACCOUNT) + count += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + } + } + if(count != atomic_read(&vm_committed_space)) + { + printk("MM crappo accounting %s: %lu %ld.\n", + x, count, atomic_read(&vm_committed_space)); + atomic_set(&vm_committed_space, count); + } + spin_unlock_irqrestore(&mmlist_lock, flags); +#endif } /* Remove one vm structure from the inode's i_mapping address space. */ @@ -161,12 +246,13 @@ /* Always allow shrinking brk. */ if (brk <= mm->brk) { - if (!do_munmap(mm, newbrk, oldbrk-newbrk)) + if (!do_munmap(mm, newbrk, oldbrk-newbrk, 1)) goto set_brk; goto out; } /* Check against rlimit.. */ + /* FIXME: - this seems to be checked in do_brk.. */ rlim = current->rlim[RLIMIT_DATA].rlim_cur; if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) goto out; @@ -175,10 +261,6 @@ if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) goto out; - /* Check if we have enough memory.. */ - if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT)) - goto out; - /* Ok, looks good - let it rip. */ if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) goto out; @@ -399,14 +481,18 @@ int correct_wcount = 0; int error; rb_node_t ** rb_link, * rb_parent; + unsigned long charged = 0; + vm_validate_enough("entering do_mmap_pgoff"); if (file && (!file->f_op || !file->f_op->mmap)) return -ENODEV; - if ((len = PAGE_ALIGN(len)) == 0) + if (!len) return addr; - if (len > TASK_SIZE) + len = PAGE_ALIGN(len); + + if (len > TASK_SIZE || len == 0) return -EINVAL; /* offset overflow? */ @@ -482,7 +568,7 @@ munmap_back: vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); if (vma && vma->vm_start < addr + len) { - if (do_munmap(mm, addr, len)) + if (do_munmap(mm, addr, len, 1)) return -ENOMEM; goto munmap_back; } @@ -492,11 +578,15 @@ > current->rlim[RLIMIT_AS].rlim_cur) return -ENOMEM; - /* Private writable mapping? Check memory availability.. */ - if ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE && - !(flags & MAP_NORESERVE) && - !vm_enough_memory(len >> PAGE_SHIFT)) - return -ENOMEM; + if (!(flags & MAP_NORESERVE) || sysctl_overcommit_memory > 1) { + if ((vm_flags & (VM_SHARED|VM_WRITE)) == VM_WRITE) { + /* Private writable mapping: check memory availability */ + charged = len >> PAGE_SHIFT; + if (!vm_enough_memory(charged)) + return -ENOMEM; + vm_flags |= VM_ACCOUNT; + } + } /* Can we just expand an old anonymous mapping? */ if (!file && !(vm_flags & VM_SHARED) && rb_parent) @@ -508,8 +598,9 @@ * not unmapped, but the maps are removed from the list. */ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + error = -ENOMEM; if (!vma) - return -ENOMEM; + goto unacct_error; vma->vm_mm = mm; vma->vm_start = addr; @@ -561,8 +652,7 @@ * to update the tree pointers. */ addr = vma->vm_start; - stale_vma = find_vma_prepare(mm, addr, &prev, - &rb_link, &rb_parent); + stale_vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); /* * Make sure the lowlevel driver did its job right. */ @@ -583,6 +673,7 @@ mm->locked_vm += len >> PAGE_SHIFT; make_pages_present(addr, addr + len); } + vm_validate_enough("out from do_mmap_pgoff"); return addr; unmap_and_free_vma: @@ -595,6 +686,10 @@ zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start); free_vma: kmem_cache_free(vm_area_cachep, vma); +unacct_error: + if(charged) + vm_unacct_memory(charged); + vm_validate_enough("error path from do_mmap_pgoff"); return error; } @@ -737,6 +832,96 @@ return NULL; } +/* vma is the first one with address < vma->vm_end, + * and even address < vma->vm_start. Have to extend vma. */ + +#ifdef ARCH_STACK_GROWSUP +static inline int expand_stack(struct vm_area_struct * vma, unsigned long address) +{ + unsigned long grow; + + if (!(vma->vm_flags & VM_GROWSUP)) + return -EFAULT; + + vm_validate_enough("entering expand_stack"); + + /* + * vma->vm_start/vm_end cannot change under us because the caller is required + * to hold the mmap_sem in write mode. We need to get the spinlock only + * before relocating the vma range ourself. + */ + spin_lock(&vma->vm_mm->page_table_lock); + + address += 4 + PAGE_SIZE - 1; + address &= PAGE_MASK; + grow = (address - vma->vm_end) >> PAGE_SHIFT; + + /* Overcommit.. */ + if(!vm_enough_memory(grow)) { + spin_unlock(&vma->vm_mm->page_table_lock); + return -ENOMEM; + } + + if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur || + ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) + { + spin_unlock(&vma->vm_mm->page_table_lock); + vm_unacct_memory(grow); + vm_validate_enough("exiting expand_stack - FAIL"); + return -ENOMEM; + } + vma->vm_end = address; + vma->vm_mm->total_vm += grow; + if (vma->vm_flags & VM_LOCKED) + vma->vm_mm->locked_vm += grow; + vm_validate_enough("exiting expand_stack"); + return 0; +} +#else + +int expand_stack(struct vm_area_struct * vma, unsigned long address) +{ + unsigned long grow; + + if (!(vma->vm_flags & VM_GROWSDOWN)) + return -EFAULT; + + vm_validate_enough("entering expand_stack"); + + /* + * vma->vm_start/vm_end cannot change under us because the caller is required + * to hold the mmap_sem in write mode. We need to get the spinlock only + * before relocating the vma range ourself. + */ + address &= PAGE_MASK; + spin_lock(&vma->vm_mm->page_table_lock); + grow = (vma->vm_start - address) >> PAGE_SHIFT; + + /* Overcommit.. */ + if(!vm_enough_memory(grow)) { + spin_unlock(&vma->vm_mm->page_table_lock); + return -ENOMEM; + } + + if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur || + ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) { + spin_unlock(&vma->vm_mm->page_table_lock); + vm_unacct_memory(grow); + vm_validate_enough("exiting expand_stack - FAIL"); + return -ENOMEM; + } + vma->vm_start = address; + vma->vm_pgoff -= grow; + vma->vm_mm->total_vm += grow; + if (vma->vm_flags & VM_LOCKED) + vma->vm_mm->locked_vm += grow; + spin_unlock(&vma->vm_mm->page_table_lock); + vm_validate_enough("exiting expand_stack"); + return 0; +} + +#endif + struct vm_area_struct * find_extend_vma(struct mm_struct * mm, unsigned long addr) { struct vm_area_struct * vma; @@ -784,7 +969,7 @@ */ static struct vm_area_struct * unmap_fixup(struct mm_struct *mm, struct vm_area_struct *area, unsigned long addr, size_t len, - struct vm_area_struct *extra) + struct vm_area_struct *extra, int acct) { struct vm_area_struct *mpnt; unsigned long end = addr + len; @@ -792,6 +977,8 @@ area->vm_mm->total_vm -= len >> PAGE_SHIFT; if (area->vm_flags & VM_LOCKED) area->vm_mm->locked_vm -= len >> PAGE_SHIFT; + if (acct && (area->vm_flags & VM_ACCOUNT)) + vm_unacct_memory(len >> PAGE_SHIFT); /* Unmapping the whole area. */ if (addr == area->vm_start && end == area->vm_end) { @@ -917,10 +1104,12 @@ * work. This now handles partial unmappings. * Jeremy Fitzhardine */ -int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len) +int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct) { struct vm_area_struct *mpnt, *prev, **npp, *free, *extra; + if(acct) vm_validate_enough("entering do_munmap"); + if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr) return -EINVAL; @@ -987,6 +1176,7 @@ (file = mpnt->vm_file) != NULL) { atomic_dec(&file->f_dentry->d_inode->i_writecount); } + remove_shared_vm_struct(mpnt); mm->map_count--; @@ -995,7 +1185,7 @@ /* * Fix the mapping, and free the old area if it wasn't reused. */ - extra = unmap_fixup(mm, mpnt, st, size, extra); + extra = unmap_fixup(mm, mpnt, st, size, extra, acct); if (file) atomic_inc(&file->f_dentry->d_inode->i_writecount); } @@ -1006,6 +1196,7 @@ kmem_cache_free(vm_area_cachep, extra); free_pgtables(mm, prev, addr, addr+len); + if(acct) vm_validate_enough("exit -ok- do_munmap"); return 0; } @@ -1016,7 +1207,7 @@ struct mm_struct *mm = current->mm; down_write(&mm->mmap_sem); - ret = do_munmap(mm, addr, len); + ret = do_munmap(mm, addr, len, 1); up_write(&mm->mmap_sem); return ret; } @@ -1033,6 +1224,9 @@ unsigned long flags; rb_node_t ** rb_link, * rb_parent; + vm_validate_enough("entering do_brk"); + + len = PAGE_ALIGN(len); if (!len) return addr; @@ -1053,7 +1247,7 @@ munmap_back: vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); if (vma && vma->vm_start < addr + len) { - if (do_munmap(mm, addr, len)) + if (do_munmap(mm, addr, len, 1)) return -ENOMEM; goto munmap_back; } @@ -1069,7 +1263,7 @@ if (!vm_enough_memory(len >> PAGE_SHIFT)) return -ENOMEM; - flags = VM_DATA_DEFAULT_FLAGS | mm->def_flags; + flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; /* Can we just expand an old anonymous mapping? */ if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len, flags)) @@ -1080,8 +1274,11 @@ */ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (!vma) + { + /* We accounted this address space - undo it */ + vm_unacct_memory(len >> PAGE_SHIFT); return -ENOMEM; - + } vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; @@ -1100,6 +1297,9 @@ mm->locked_vm += len >> PAGE_SHIFT; make_pages_present(addr, addr + len); } + + vm_validate_enough("exiting do_brk"); + return addr; } @@ -1141,6 +1341,10 @@ unsigned long end = mpnt->vm_end; unsigned long size = end - start; + /* If the VMA has been charged for, account for its removal */ + if (mpnt->vm_flags & VM_ACCOUNT) + vm_unacct_memory(size >> PAGE_SHIFT); + if (mpnt->vm_ops) { if (mpnt->vm_ops->close) mpnt->vm_ops->close(mpnt); @@ -1159,8 +1363,9 @@ BUG(); clear_page_tables(mm, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD); - flush_tlb_mm(mm); + vm_validate_enough("exiting exit_mmap"); + } /* Insert vm structure into process list sorted by address diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/mm/mprotect.c linux.21pre4-ac2/mm/mprotect.c --- linux.21pre4/mm/mprotect.c 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/mm/mprotect.c 2003-01-06 15:38:09.000000000 +0000 @@ -2,6 +2,23 @@ * linux/mm/mprotect.c * * (C) Copyright 1994 Linus Torvalds + * + * Address space accounting code + * (c) Copyright 2002 Red Hat Inc, All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include @@ -241,11 +258,28 @@ { pgprot_t newprot; int error; + unsigned long charged = 0; if (newflags == vma->vm_flags) { *pprev = vma; return 0; } + + /* + * If we make a private mapping writable we increase our commit; + * but (without finer accounting) cannot reduce our commit if we + * make it unwritable again. + * + * FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting + * a MAP_NORESERVE private mapping to writable will now reserve. + */ + if ((newflags & VM_WRITE) && + !(vma->vm_flags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))) { + charged = (end - start) >> PAGE_SHIFT; + if (!vm_enough_memory(charged)) + return -ENOMEM; + newflags |= VM_ACCOUNT; + } newprot = protection_map[newflags & 0xf]; if (start == vma->vm_start) { if (end == vma->vm_end) @@ -256,10 +290,10 @@ error = mprotect_fixup_end(vma, pprev, start, newflags, newprot); else error = mprotect_fixup_middle(vma, pprev, start, end, newflags, newprot); - - if (error) + if (error) { + vm_unacct_memory(charged); return error; - + } change_protection(start, end, newprot); return 0; } @@ -270,6 +304,8 @@ struct vm_area_struct * vma, * next, * prev; int error = -EINVAL; + vm_validate_enough("entering mprotect"); + if (start & ~PAGE_MASK) return -EINVAL; len = PAGE_ALIGN(len); @@ -333,5 +369,6 @@ } out: up_write(¤t->mm->mmap_sem); + vm_validate_enough("exiting mprotect"); return error; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/mm/mremap.c linux.21pre4-ac2/mm/mremap.c --- linux.21pre4/mm/mremap.c 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/mm/mremap.c 2003-01-06 15:38:09.000000000 +0000 @@ -2,6 +2,23 @@ * linux/mm/remap.c * * (C) Copyright 1996 Linus Torvalds + * + * Address space accounting code + * (c) Copyright 2002 Red Hat Inc, All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include @@ -13,8 +30,6 @@ #include #include -extern int vm_enough_memory(long pages); - static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr) { pgd_t * pgd; @@ -130,6 +145,7 @@ struct vm_area_struct * new_vma, * next, * prev; int allocated_vma; + new_vma = NULL; next = find_vma_prev(mm, new_addr, &prev); if (next) { @@ -189,7 +205,8 @@ new_vma->vm_ops->open(new_vma); insert_vm_struct(current->mm, new_vma); } - do_munmap(current->mm, addr, old_len); + /* The old VMA has been accounted for, don't double account */ + do_munmap(current->mm, addr, old_len, 0); current->mm->total_vm += new_len >> PAGE_SHIFT; if (new_vma->vm_flags & VM_LOCKED) { current->mm->locked_vm += new_len >> PAGE_SHIFT; @@ -217,6 +234,7 @@ { struct vm_area_struct *vma; unsigned long ret = -EINVAL; + unsigned long charged = 0; if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) goto out; @@ -246,16 +264,17 @@ if ((addr <= new_addr) && (addr+old_len) > new_addr) goto out; - do_munmap(current->mm, new_addr, new_len); + do_munmap(current->mm, new_addr, new_len, 1); } /* * Always allow a shrinking remap: that just unmaps * the unnecessary pages.. + * do_munmap does all the needed commit accounting */ ret = addr; if (old_len >= new_len) { - do_munmap(current->mm, addr+new_len, old_len - new_len); + do_munmap(current->mm, addr+new_len, old_len - new_len, 1); if (!(flags & MREMAP_FIXED) || (new_addr == addr)) goto out; } @@ -285,11 +304,12 @@ if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len) > current->rlim[RLIMIT_AS].rlim_cur) goto out; - /* Private writable mapping? Check memory availability.. */ - if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE && - !(flags & MAP_NORESERVE) && - !vm_enough_memory((new_len - old_len) >> PAGE_SHIFT)) - goto out; + + if (vma->vm_flags & VM_ACCOUNT) { + charged = (new_len - old_len) >> PAGE_SHIFT; + if(!vm_enough_memory(charged)) + goto out_nc; + } /* old_len exactly to the end of the area.. * And we're not relocating the area. @@ -313,6 +333,7 @@ addr + new_len); } ret = addr; + vm_validate_enough("mremap path1"); goto out; } } @@ -336,6 +357,12 @@ ret = move_vma(vma, addr, old_len, new_len, new_addr); } out: + if(ret & ~PAGE_MASK) + { + vm_unacct_memory(charged); + vm_validate_enough("mremap error path"); + } +out_nc: return ret; } @@ -345,8 +372,10 @@ { unsigned long ret; + vm_validate_enough("entry to mremap"); down_write(¤t->mm->mmap_sem); ret = do_mremap(addr, old_len, new_len, flags, new_addr); up_write(¤t->mm->mmap_sem); + vm_validate_enough("exit from mremap"); return ret; } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/mm/oom_kill.c linux.21pre4-ac2/mm/oom_kill.c --- linux.21pre4/mm/oom_kill.c 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/mm/oom_kill.c 2003-01-06 19:30:57.000000000 +0000 @@ -82,7 +82,7 @@ * Niced processes are most likely less important, so double * their badness points. */ - if (p->nice > 0) + if (task_nice(p) > 0) points *= 2; /* @@ -146,7 +146,7 @@ * all the memory it needs. That way it should be able to * exit() and clear out its resources quickly... */ - p->counter = 5 * HZ; + p->time_slice = HZ; p->flags |= PF_MEMALLOC | PF_MEMDIE; /* This process has hardware access, be more careful. */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/mm/shmem.c linux.21pre4-ac2/mm/shmem.c --- linux.21pre4/mm/shmem.c 2003-01-29 17:07:59.000000000 +0000 +++ linux.21pre4-ac2/mm/shmem.c 2003-01-07 00:12:54.000000000 +0000 @@ -5,8 +5,25 @@ * 2000 Transmeta Corp. * 2000-2001 Christoph Rohland * 2000-2001 SAP AG + * 2002 Red Hat Inc. + * TODO: + * Accounting needs verification + * We need to verify all the security fixes from generic_file_write are + * now there * - * This file is released under the GPL. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* @@ -21,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -54,43 +72,25 @@ LIST_HEAD (shmem_inodes); static spinlock_t shmem_ilock = SPIN_LOCK_UNLOCKED; -atomic_t shmem_nrpages = ATOMIC_INIT(0); /* Not used right now */ +atomic_t shmem_nrpages = ATOMIC_INIT(0); static struct page *shmem_getpage_locked(struct shmem_inode_info *, struct inode *, unsigned long); -/* - * shmem_recalc_inode - recalculate the size of an inode - * - * @inode: inode to recalc - * @swap: additional swap pages freed externally - * - * We have to calculate the free blocks since the mm can drop pages - * behind our back - * - * But we know that normally - * inodes->i_blocks/BLOCKS_PER_PAGE == - * inode->i_mapping->nrpages + info->swapped - * - * So the mm freed - * inodes->i_blocks/BLOCKS_PER_PAGE - - * (inode->i_mapping->nrpages + info->swapped) - * - * It has to be called with the spinlock held. - */ - -static void shmem_recalc_inode(struct inode * inode) +static void shmem_removepage(struct page *page) { - unsigned long freed; + struct inode * inode; + struct shmem_sb_info * sbinfo; - freed = (inode->i_blocks/BLOCKS_PER_PAGE) - - (inode->i_mapping->nrpages + SHMEM_I(inode)->swapped); - if (freed){ - struct shmem_sb_info * sbinfo = SHMEM_SB(inode->i_sb); - inode->i_blocks -= freed*BLOCKS_PER_PAGE; - spin_lock (&sbinfo->stat_lock); - sbinfo->free_blocks += freed; - spin_unlock (&sbinfo->stat_lock); - } + atomic_dec(&shmem_nrpages); + if (PageLaunder(page)) + return; + + inode = page->mapping->host; + sbinfo = SHMEM_SB(inode->i_sb); + inode->i_blocks -= BLOCKS_PER_PAGE; + spin_lock (&sbinfo->stat_lock); + sbinfo->free_blocks++; + spin_unlock (&sbinfo->stat_lock); } /* @@ -320,6 +320,7 @@ unsigned long partial; unsigned long freed = 0; struct shmem_inode_info * info = SHMEM_I(inode); + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); down(&info->sem); inode->i_ctime = inode->i_mtime = CURRENT_TIME; @@ -352,11 +353,41 @@ freed += shmem_truncate_indirect(info, index); info->swapped -= freed; - shmem_recalc_inode(inode); + inode->i_blocks -= freed*BLOCKS_PER_PAGE; spin_unlock (&info->lock); + spin_lock (&sbinfo->stat_lock); + sbinfo->free_blocks += freed; + spin_unlock (&sbinfo->stat_lock); up(&info->sem); } +static int shmem_notify_change(struct dentry * dentry, struct iattr *attr) +{ + struct inode *inode = dentry->d_inode; + long change = 0; + int error; + + if ((attr->ia_valid & ATTR_SIZE) && (attr->ia_size <= SHMEM_MAX_BYTES)) { + /* + * Account swap file usage based on new file size, + * but just let vmtruncate fail on out-of-range sizes. + */ + change = VM_ACCT(attr->ia_size) - VM_ACCT(inode->i_size); + if (change > 0) { + if (!vm_enough_memory(change)) + return -ENOMEM; + } else + vm_unacct_memory(-change); + + } + error = inode_change_ok(inode, attr); + if (!error) + error = inode_setattr(inode, attr); + if (error && change) + vm_unacct_memory(change); + return error; +} + static void shmem_delete_inode(struct inode * inode) { struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); @@ -365,6 +396,7 @@ spin_lock (&shmem_ilock); list_del (&SHMEM_I(inode)->list); spin_unlock (&shmem_ilock); + vm_unacct_memory(VM_ACCT(inode->i_size)); inode->i_size = 0; shmem_truncate (inode); } @@ -419,6 +451,7 @@ swap_free(entry); ptr[offset] = (swp_entry_t) {0}; delete_from_swap_cache(page); + atomic_inc(&shmem_nrpages); add_to_page_cache(page, info->inode->i_mapping, offset + idx); SetPageDirty(page); SetPageUptodate(page); @@ -484,7 +517,6 @@ entry = shmem_swp_entry(info, index, 0); if (IS_ERR(entry)) /* this had been allocated on page allocation */ BUG(); - shmem_recalc_inode(inode); if (entry->val) BUG(); @@ -498,6 +530,7 @@ * Raced with "speculative" read_swap_cache_async. * Add page back to page cache, unref swap, try again. */ + atomic_inc(&shmem_nrpages); add_to_page_cache_locked(page, mapping, index); spin_unlock(&info->lock); swap_free(swap); @@ -555,7 +588,6 @@ return page; } - shmem_recalc_inode(inode); if (entry->val) { unsigned long flags; @@ -622,6 +654,7 @@ /* We have the page */ SetPageUptodate(page); + atomic_inc(&shmem_nrpages); return page; no_space: spin_unlock (&sbinfo->stat_lock); @@ -781,6 +814,13 @@ static struct inode_operations shmem_symlink_inode_operations; static struct inode_operations shmem_symlink_inline_operations; +/* + * This is a copy of generic_file_write slightly modified. It would + * help no end if it were kept remotely up to date with the + * generic_file_write changes. I don't alas see a good way to merge + * it back and use the generic one -- Alan + */ + static ssize_t shmem_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos) { @@ -792,6 +832,7 @@ unsigned long written; long status; int err; + loff_t maxpos; if ((ssize_t) count < 0) return -EINVAL; @@ -804,12 +845,12 @@ pos = *ppos; err = -EINVAL; if (pos < 0) - goto out; + goto out_nc; err = file->f_error; if (err) { file->f_error = 0; - goto out; + goto out_nc; } written = 0; @@ -817,6 +858,18 @@ if (file->f_flags & O_APPEND) pos = inode->i_size; + maxpos = inode->i_size; + + if (pos + count > inode->i_size) { + maxpos = pos + count; + if (maxpos > SHMEM_MAX_BYTES) + maxpos = SHMEM_MAX_BYTES; + if (!vm_enough_memory(VM_ACCT(maxpos) - VM_ACCT(inode->i_size))) { + err = -ENOMEM; + goto out_nc; + } + } + /* * Check whether we've reached the file size limit. */ @@ -826,21 +879,77 @@ send_sig(SIGXFSZ, current, 0); goto out; } - if (count > limit - pos) { + if (pos > 0xFFFFFFFFULL || count > limit - (u32)pos) { + /* send_sig(SIGXFSZ, current, 0); */ + count = limit - (u32)pos; + } + } + + /* + * LFS rule + */ + if ( pos + count > MAX_NON_LFS && !(file->f_flags&O_LARGEFILE)) { + if (pos >= MAX_NON_LFS) { send_sig(SIGXFSZ, current, 0); - count = limit - pos; + goto out; + } + if (count > MAX_NON_LFS - (u32)pos) { + /* send_sig(SIGXFSZ, current, 0); */ + count = MAX_NON_LFS - (u32)pos; } } - status = 0; - if (count) { - remove_suid(inode); - inode->i_ctime = inode->i_mtime = CURRENT_TIME; + /* + * Are we about to exceed the fs block limit ? + * + * If we have written data it becomes a short write + * If we have exceeded without writing data we send + * a signal and give them an EFBIG. + * + * Linus frestrict idea will clean these up nicely.. + */ + + if (!S_ISBLK(inode->i_mode)) { + if (pos >= inode->i_sb->s_maxbytes) + { + if (count || pos > inode->i_sb->s_maxbytes) { + send_sig(SIGXFSZ, current, 0); + err = -EFBIG; + goto out; + } + /* zero-length writes at ->s_maxbytes are OK */ + } + + if (pos + count > inode->i_sb->s_maxbytes) + count = inode->i_sb->s_maxbytes - pos; + } else { + if (is_read_only(inode->i_rdev)) { + err = -EPERM; + goto out; + } + if (pos >= inode->i_size) { + if (count || pos > inode->i_size) { + err = -ENOSPC; + goto out; + } + } + + if (pos + count > inode->i_size) + count = inode->i_size - pos; } + err = 0; + if (count == 0) + goto out; + status = 0; + + remove_suid(inode); + inode->i_ctime = inode->i_mtime = CURRENT_TIME; + while (count) { unsigned long bytes, index, offset; char *kaddr; + int deactivate = 1; /* * Try to find the page in the cache. If it isn't there, @@ -851,6 +960,7 @@ bytes = PAGE_CACHE_SIZE - offset; if (bytes > count) { bytes = count; + deactivate = 0; } /* @@ -879,7 +989,7 @@ } kaddr = kmap(page); - status = copy_from_user(kaddr+offset, buf, bytes); + status = __copy_from_user(kaddr+offset, buf, bytes); kunmap(page); if (status) goto fail_write; @@ -896,7 +1006,14 @@ } unlock: /* Mark it unlocked again and drop the page.. */ + SetPageReferenced(page); UnlockPage(page); +#if USING_RMAP + if (deactivate) + deactivate_page(page); + else + mark_page_accessed(page); +#endif page_cache_release(page); if (status < 0) @@ -906,6 +1023,10 @@ err = written ? written : status; out: + /* Short writes give back address space */ + if (inode->i_size != maxpos) + vm_unacct_memory(VM_ACCT(maxpos) - VM_ACCT(inode->i_size)); +out_nc: up(&inode->i_sem); return err; fail_write: @@ -919,13 +1040,14 @@ struct inode *inode = filp->f_dentry->d_inode; struct address_space *mapping = inode->i_mapping; unsigned long index, offset; + int nr = 1; index = *ppos >> PAGE_CACHE_SHIFT; offset = *ppos & ~PAGE_CACHE_MASK; - for (;;) { + while (nr && desc->count) { struct page *page; - unsigned long end_index, nr, ret; + unsigned long end_index, nr; end_index = inode->i_size >> PAGE_CACHE_SHIFT; if (index > end_index) @@ -955,14 +1077,12 @@ * "pos" here (the actor routine has to update the user buffer * pointers and the remaining count). */ - ret = file_read_actor(desc, page, offset, nr); - offset += ret; + nr = file_read_actor(desc, page, offset, nr); + offset += nr; index += offset >> PAGE_CACHE_SHIFT; offset &= ~PAGE_CACHE_MASK; page_cache_release(page); - if (ret != nr || !desc->count) - break; } *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; @@ -1179,10 +1299,15 @@ memcpy(info, symname, len); inode->i_op = &shmem_symlink_inline_operations; } else { + if (!vm_enough_memory(VM_ACCT(1))) { + iput(inode); + return -ENOMEM; + } down(&info->sem); page = shmem_getpage_locked(info, inode, 0); if (IS_ERR(page)) { up(&info->sem); + vm_unacct_memory(VM_ACCT(1)); iput(inode); return PTR_ERR(page); } @@ -1385,6 +1510,7 @@ static struct address_space_operations shmem_aops = { + removepage: shmem_removepage, writepage: shmem_writepage, }; @@ -1399,6 +1525,7 @@ static struct inode_operations shmem_inode_operations = { truncate: shmem_truncate, + setattr: shmem_notify_change, }; static struct inode_operations shmem_dir_inode_operations = { @@ -1494,7 +1621,6 @@ struct inode * inode; struct dentry *dentry, *root; struct qstr this; - int vm_enough_memory(long pages); if (size > SHMEM_MAX_BYTES) return ERR_PTR(-EINVAL); @@ -1502,13 +1628,14 @@ if (!vm_enough_memory(VM_ACCT(size))) return ERR_PTR(-ENOMEM); + error = -ENOMEM; this.name = name; this.len = strlen(name); this.hash = 0; /* will go */ root = shm_mnt->mnt_root; dentry = d_alloc(root, &this); if (!dentry) - return ERR_PTR(-ENOMEM); + goto put_memory; error = -ENFILE; file = get_empty_filp(); @@ -1533,6 +1660,8 @@ put_filp(file); put_dentry: dput (dentry); +put_memory: + vm_unacct_memory(VM_ACCT(size)); return ERR_PTR(error); } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/mm/vmalloc.c linux.21pre4-ac2/mm/vmalloc.c --- linux.21pre4/mm/vmalloc.c 2003-01-29 16:26:33.000000000 +0000 +++ linux.21pre4-ac2/mm/vmalloc.c 2003-01-06 17:12:42.000000000 +0000 @@ -327,3 +327,22 @@ read_unlock(&vmlist_lock); return buf - buf_start; } + +void *vcalloc(unsigned long nmemb, unsigned long elem_size) +{ + unsigned long size; + void *addr; + + /* + * Check that we're not going to overflow. + */ + if (nmemb > (ULONG_MAX / elem_size)) + return NULL; + + size = nmemb * elem_size; + addr = vmalloc(size); + if (addr) + memset(addr, 0, size); + + return addr; +} diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/net/bluetooth/bnep/core.c linux.21pre4-ac2/net/bluetooth/bnep/core.c --- linux.21pre4/net/bluetooth/bnep/core.c 2003-01-29 17:07:59.000000000 +0000 +++ linux.21pre4-ac2/net/bluetooth/bnep/core.c 2003-01-07 15:14:40.000000000 +0000 @@ -460,8 +460,6 @@ sigfillset(¤t->blocked); flush_signals(current); - current->nice = -15; - set_fs(KERNEL_DS); init_waitqueue_entry(&wait, current); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/net/core/skbuff.c linux.21pre4-ac2/net/core/skbuff.c --- linux.21pre4/net/core/skbuff.c 2003-01-29 16:26:39.000000000 +0000 +++ linux.21pre4-ac2/net/core/skbuff.c 2003-01-09 00:15:19.000000000 +0000 @@ -731,6 +731,36 @@ return n; } +/** + * skb_pad - zero pad the tail of an skb + * @skb: buffer to pad + * @pad: space to pad + * + * Ensure that a buffer is followed by a padding area that is zero + * filled. Used by network drivers which may DMA or transfer data + * beyond the buffer end onto the wire. + * + * May return NULL in out of memory cases. + */ + +struct sk_buff *skb_pad(struct sk_buff *skb, int pad) +{ + struct sk_buff *nskb; + + /* If the skbuff is non linear tailroom is always zero.. */ + if(skb_tailroom(skb) >= pad) + { + memset(skb->data+skb->len, 0, pad); + return skb; + } + + nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC); + kfree_skb(skb); + if(nskb) + memset(nskb->data+nskb->len, 0, pad); + return nskb; +} + /* Trims skb to length len. It can change skb pointers, if "realloc" is 1. * If realloc==0 and trimming is impossible without change of data, * it is BUG(). diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/net/ipv4/tcp_minisocks.c linux.21pre4-ac2/net/ipv4/tcp_minisocks.c --- linux.21pre4/net/ipv4/tcp_minisocks.c 2003-01-29 17:07:59.000000000 +0000 +++ linux.21pre4-ac2/net/ipv4/tcp_minisocks.c 2003-01-31 10:58:59.000000000 +0000 @@ -938,6 +938,12 @@ if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) goto embryonic_reset; + /* ACK sequence verified above, just make sure ACK is + * set. If ACK not set, just silently drop the packet. + */ + if (!(flg & TCP_FLAG_ACK)) + return NULL; + /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == req->rcv_isn+1) { req->acked = 1; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/net/ipv4/tcp_output.c linux.21pre4-ac2/net/ipv4/tcp_output.c --- linux.21pre4/net/ipv4/tcp_output.c 2003-01-29 16:26:39.000000000 +0000 +++ linux.21pre4-ac2/net/ipv4/tcp_output.c 2003-01-30 16:54:01.000000000 +0000 @@ -718,13 +718,13 @@ /* Ok. We will be able to collapse the packet. */ __skb_unlink(next_skb, next_skb->list); + memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size); + if (next_skb->ip_summed == CHECKSUM_HW) skb->ip_summed = CHECKSUM_HW; - if (skb->ip_summed != CHECKSUM_HW) { - memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size); + if (skb->ip_summed != CHECKSUM_HW) skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); - } /* Update sequence range on original skb. */ TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/net/netsyms.c linux.21pre4-ac2/net/netsyms.c --- linux.21pre4/net/netsyms.c 2003-01-29 17:07:59.000000000 +0000 +++ linux.21pre4-ac2/net/netsyms.c 2003-01-29 17:26:57.000000000 +0000 @@ -82,7 +82,7 @@ extern void destroy_8023_client(struct datalink_proto *); #endif -#ifdef CONFIG_ATALK_MODULE +#if defined(CONFIG_ATALK_MODULE) || defined(CONFIG_FILTER) #include #endif @@ -462,6 +462,7 @@ #endif /* CONFIG_INET */ #ifdef CONFIG_TR +EXPORT_SYMBOL(tr_source_route); EXPORT_SYMBOL(tr_type_trans); #endif @@ -480,6 +481,7 @@ EXPORT_SYMBOL(__dev_get_by_index); EXPORT_SYMBOL(dev_get_by_name); EXPORT_SYMBOL(__dev_get_by_name); +EXPORT_SYMBOL(dev_getbyhwaddr); EXPORT_SYMBOL(netdev_finish_unregister); EXPORT_SYMBOL(netdev_set_master); EXPORT_SYMBOL(eth_type_trans); @@ -493,6 +495,7 @@ EXPORT_SYMBOL(__kfree_skb); EXPORT_SYMBOL(skb_clone); EXPORT_SYMBOL(skb_copy); +EXPORT_SYMBOL(skb_pad); EXPORT_SYMBOL(netif_rx); EXPORT_SYMBOL(netif_receive_skb); EXPORT_SYMBOL(dev_add_pack); diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/Rules.make linux.21pre4-ac2/Rules.make --- linux.21pre4/Rules.make 2003-01-29 16:26:39.000000000 +0000 +++ linux.21pre4-ac2/Rules.make 2003-01-06 16:19:54.000000000 +0000 @@ -291,10 +291,6 @@ include .depend endif -ifneq ($(wildcard $(TOPDIR)/.hdepend),) -include $(TOPDIR)/.hdepend -endif - # # Find files whose flags have changed and force recompilation. # For safety, this works in the converse direction: diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/scripts/extract-ikconfig linux.21pre4-ac2/scripts/extract-ikconfig --- linux.21pre4/scripts/extract-ikconfig 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/scripts/extract-ikconfig 2003-01-06 16:19:54.000000000 +0000 @@ -0,0 +1,17 @@ +#! /bin/bash -x +# extracts .config info from a [b]zImage file +# uses: binoffset (new), dd, zcat, strings, grep +# $arg1 is [b]zImage filename + +HDR=`binoffset $1 0x1f 0x8b 0x08 0x0` +PID=$$ +TMPFILE="$1.vmlin.$PID" + +# dd if=$1 bs=1 skip=$HDR | zcat - | strings /dev/stdin \ +# | grep "[A-Za-z_0-9]=[ynm]$" | sed "s/^/CONFIG_/" > $1.oldconfig.$PID +# exit + +dd if=$1 bs=1 skip=$HDR | zcat - > $TMPFILE +strings $TMPFILE | grep "^[\#[:blank:]]*CONFIG_[A-Za-z_0-9]*" > $1.oldconfig.$PID +wc $1.oldconfig.$PID +rm $TMPFILE diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/scripts/include_deps linux.21pre4-ac2/scripts/include_deps --- linux.21pre4/scripts/include_deps 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/scripts/include_deps 2003-01-06 16:19:54.000000000 +0000 @@ -0,0 +1,15 @@ +# Read the .depend files, extract the dependencies for .h targets, convert +# relative names to absolute and write the result to stdout. It is part of +# building the global .h dependency graph for kbuild 2.4. KAO + +/^[^ ]/ { copy = 0; fn = "/error/"; } +/^[^ ][^ ]*\.h:/ { copy = 1; fn = FILENAME; sub(/\.depend/, "", fn); } +!copy { next; } + { + indent = $0; sub(/[^ ].*/, "", indent); + if ($1 != "" && $1 !~ /^[@$\/\\]/) { $1 = fn $1 }; + if ($2 != "" && $2 !~ /^[@$\/\\]/) { $2 = fn $2 }; + $1 = $1; # ensure $0 is rebuilt + $0 = indent $0; + print; + } diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/scripts/mkconfigs.c linux.21pre4-ac2/scripts/mkconfigs.c --- linux.21pre4/scripts/mkconfigs.c 1970-01-01 01:00:00.000000000 +0100 +++ linux.21pre4-ac2/scripts/mkconfigs.c 2003-01-06 16:19:54.000000000 +0000 @@ -0,0 +1,181 @@ +/*************************************************************************** + * mkconfigs.c + * (C) 2002 Randy Dunlap + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + +# Rules for scripts/mkconfigs.c input.config output.c +# to generate configs.c from linux/.config: +# - drop lines that begin with '#' +# - drop blank lines +# - lines that use double-quotes must \\-escape-quote them + +################## skeleton configs.c file: #################### + +#include +#include + +static char *configs[] __initdata = + + , + +; + +################### end configs.c file ###################### + + * Changelog for ver. 0.2, 2002-02-15, rddunlap@osdl.org: + - strip leading "CONFIG_" from config option strings; + - use "static" and "__attribute__((unused))"; + - don't use EXPORT_SYMBOL(); + - separate each config line with \newline instead of space; + + * Changelog for ver. 0.3, 2002-02-18, rddunlap@osdl.org: + - keep all "not set" comment lines from .config so that 'make *config' + will be happy, but don't keep other comments; + - keep leading "CONFIG_" on each line; + +****************************************************************/ + +#include +#include +#include +#include + +#define VERSION "0.2" +#define LINE_SIZE 1000 + +int include_all_lines = 1; // whether to include "=n" lines in the output + +void usage (const char *progname) +{ + fprintf (stderr, "%s ver. %s\n", progname, VERSION); + fprintf (stderr, "usage: %s input.config.name path/configs.c\n", + progname); + exit (1); +} + +void make_intro (FILE *sourcefile) +{ + fprintf (sourcefile, "#include \n"); +///// fprintf (sourcefile, "#include \n"); + fprintf (sourcefile, "\n"); +///// fprintf (sourcefile, "char *configs[] __initdata = {\n"); + fprintf (sourcefile, "static char __attribute__ ((unused)) *configs[] __initdata = {\n"); + fprintf (sourcefile, " \"CONFIG_BEGIN=n\\n\" ,\n"); +} + +void make_ending (FILE *sourcefile) +{ + fprintf (sourcefile, " \"CONFIG_END=n\\n\"\n"); + fprintf (sourcefile, "};\n"); +///// fprintf (sourcefile, "EXPORT_SYMBOL (configs);\n"); +} + +void make_lines (FILE *configfile, FILE *sourcefile) +{ + char cfgline[LINE_SIZE]; + char *ch; + + while (fgets (cfgline, LINE_SIZE, configfile)) { + /* kill the trailing newline in cfgline */ + cfgline[strlen (cfgline) - 1] = '\0'; + + /* don't keep #-only line or an empty/blank line */ + if ((cfgline[0] == '#' && cfgline[1] == '\0') || + cfgline[0] == '\0') + continue; + + if (!include_all_lines && + cfgline[0] == '#') // strip out all comment lines + continue; + + /* really only want to keep lines that begin with + * "CONFIG_" or "# CONFIG_" */ + if (strncmp (cfgline, "CONFIG_", 7) && + strncmp (cfgline, "# CONFIG_", 9)) + continue; + + /* + * use strchr() to check for "-quote in cfgline; + * if not found, output the line, quoted; + * if found, output a char at a time, with \\-quote + * preceding double-quote chars + */ + if (!strchr (cfgline, '"')) { + fprintf (sourcefile, " \"%s\\n\" ,\n", cfgline); + continue; + } + + /* go to char-at-a-time mode for this config and + * precede any double-quote with a backslash */ + fprintf (sourcefile, " \""); /* lead-in */ + for (ch = cfgline; *ch; ch++) { + if (*ch == '"') + fputc ('\\', sourcefile); + fputc (*ch, sourcefile); + } + fprintf (sourcefile, "\\n\" ,\n"); + } +} + +int make_configs (FILE *configfile, FILE *sourcefile) +{ + make_intro (sourcefile); + make_lines (configfile, sourcefile); + make_ending (sourcefile); +} + +int main (int argc, char *argv[]) +{ + char *progname = argv[0]; + char *configname, *sourcename; + FILE *configfile, *sourcefile; + + if (argc != 3) + usage (progname); + + configname = argv[1]; + sourcename = argv[2]; + + configfile = fopen (configname, "r"); + if (!configfile) { + fprintf (stderr, "%s: cannot open '%s'\n", + progname, configname); + exit (2); + } + sourcefile = fopen (sourcename, "w"); + if (!sourcefile) { + fprintf (stderr, "%s: cannot open '%s'\n", + progname, sourcename); + exit (2); + } + + make_configs (configfile, sourcefile); + + if (fclose (sourcefile)) { + fprintf (stderr, "%s: error %d closing '%s'\n", + progname, errno, sourcename); + exit (3); + } + if (fclose (configfile)) { + fprintf (stderr, "%s: error %d closing '%s'\n", + progname, errno, configname); + exit (3); + } + + exit (0); +} + +/* end mkconfigs.c */ diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/scripts/mkdep.c linux.21pre4-ac2/scripts/mkdep.c --- linux.21pre4/scripts/mkdep.c 2003-01-29 16:27:21.000000000 +0000 +++ linux.21pre4-ac2/scripts/mkdep.c 2003-01-06 16:19:54.000000000 +0000 @@ -48,6 +48,8 @@ char __depname[512] = "\n\t@touch "; #define depname (__depname+9) int hasdep; +char cwd[PATH_MAX]; +int lcwd; struct path_struct { int len; @@ -202,8 +204,22 @@ memcpy(path->buffer+path->len, name, len); path->buffer[path->len+len] = '\0'; if (access(path->buffer, F_OK) == 0) { + int l = lcwd + strlen(path->buffer); + char name2[l+2], *p; + if (path->buffer[0] == '/') { + memcpy(name2, path->buffer, l+1); + } + else { + memcpy(name2, cwd, lcwd); + name2[lcwd] = '/'; + memcpy(name2+lcwd+1, path->buffer, path->len+len+1); + } + while ((p = strstr(name2, "/../"))) { + *p = '\0'; + strcpy(strrchr(name2, '/'), p+3); + } do_depname(); - printf(" \\\n %s", path->buffer); + printf(" \\\n %s", name2); return; } } @@ -585,6 +601,12 @@ return 1; } + if (!getcwd(cwd, sizeof(cwd))) { + fprintf(stderr, "mkdep: getcwd() failed %m\n"); + return 1; + } + lcwd = strlen(cwd); + add_path("."); /* for #include "..." */ while (++argv, --argc > 0) { diff -u --exclude-from /usr/src/exclude --new-file --recursive linux.21pre4/scripts/tkgen.c linux.21pre4-ac2/scripts/tkgen.c --- linux.21pre4/scripts/tkgen.c 2003-01-29 16:27:21.000000000 +0000 +++ linux.21pre4-ac2/scripts/tkgen.c 2003-01-06 16:19:54.000000000 +0000 @@ -625,6 +625,7 @@ if ( ! vartable[i].global_written ) { global( vartable[i].name ); + vartable[i].global_written = 1; } printf( "\t" ); } @@ -699,6 +700,19 @@ } /* + * Generate global declarations for the dependency chain (e.g. CONSTANT_M). + */ + for ( tmp = cfg->depend; tmp; tmp = tmp->next ) + { + int i = get_varnum( tmp->name ); + if ( ! vartable[i].global_written ) + { + global( vartable[i].name ); + vartable[i].global_written = 1; + } + } + + /* * Generate indentation. */ printf( "\t" );